diff --git a/Documentation/devicetree/bindings/rtc/rtc.yaml b/Documentation/devicetree/bindings/rtc/rtc.yaml index 8acd2de3de3adb..d30dc045aac648 100644 --- a/Documentation/devicetree/bindings/rtc/rtc.yaml +++ b/Documentation/devicetree/bindings/rtc/rtc.yaml @@ -63,6 +63,11 @@ properties: description: Enables wake up of host system on alarm. + reset-source: + $ref: /schemas/types.yaml#/definitions/flag + description: + The RTC is able to reset the machine. + additionalProperties: true ... diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst index b0ea17da8ff638..654649556306fe 100644 --- a/Documentation/gpu/todo.rst +++ b/Documentation/gpu/todo.rst @@ -273,6 +273,24 @@ Contact: Daniel Vetter, Noralf Tronnes Level: Advanced +Garbage collect fbdev scrolling acceleration +-------------------------------------------- + +Scroll acceleration is disabled in fbcon by hard-wiring p->scrollmode = +SCROLL_REDRAW. There's a ton of code this will allow us to remove: +- lots of code in fbcon.c +- a bunch of the hooks in fbcon_ops, maybe the remaining hooks could be called + directly instead of the function table (with a switch on p->rotate) +- fb_copyarea is unused after this, and can be deleted from all drivers + +Note that not all acceleration code can be deleted, since clearing and cursor +support is still accelerated, which might be good candidates for further +deletion projects. + +Contact: Daniel Vetter + +Level: Intermediate + idr_init_base() --------------- diff --git a/Makefile b/Makefile index 2e7d6670794ab2..1a03a53181b9a3 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 5 PATCHLEVEL = 10 -SUBLEVEL = 4 +SUBLEVEL = 6 EXTRAVERSION = NAME = Kleptomaniac Octopus diff --git a/arch/arm/configs/odroidxu4_defconfig b/arch/arm/configs/odroidxu4_defconfig index 661a8490d87a50..fd7bfade0724aa 100644 --- a/arch/arm/configs/odroidxu4_defconfig +++ b/arch/arm/configs/odroidxu4_defconfig @@ -1681,9 +1681,12 @@ CONFIG_PREVENT_FIRMWARE_BUILD=y # Firmware loader # CONFIG_FW_LOADER=y -CONFIG_EXTRA_FIRMWARE="" +CONFIG_FW_LOADER_PAGED_BUF=y +CONFIG_EXTRA_FIRMWARE="edid/1024x600.bin edid/1024x768.bin edid/1152x864_75hz.bin edid/1280x1024.bin edid/1280x720.bin edid/1280x768.bin edid/1280x800.bin edid/1360x768.bin edid/1366x768.bin edid/1400x1050.bin edid/1440x900.bin edid/1600x1200.bin edid/1600x900.bin edid/1680x1050.bin edid/1792x1344.bin edid/1920x1080_23_976hz.bin edid/1920x1080_24hz.bin edid/1920x1080_50hz.bin edid/1920x1080.bin edid/1920x1200_30hz.bin edid/1920x1200_60hz.bin edid/1920x800.bin edid/480x800.bin edid/640x480.bin edid/720x480.bin edid/720x576.bin edid/800x480.bin edid/800x600.bin edid/848x480.bin edid/480x272.bin" +CONFIG_EXTRA_FIRMWARE_DIR="firmware" # CONFIG_FW_LOADER_USER_HELPER is not set -# CONFIG_FW_LOADER_COMPRESS is not set +CONFIG_FW_LOADER_COMPRESS=y +CONFIG_FW_CACHE=y # end of Firmware loader CONFIG_WANT_DEV_COREDUMP=y diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index ef12e097f31845..27ca549ff47ed1 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -536,7 +536,7 @@ virtual_memmap_init(u64 start, u64 end, void *arg) if (map_start < map_end) memmap_init_zone((unsigned long)(map_end - map_start), - args->nid, args->zone, page_to_pfn(map_start), + args->nid, args->zone, page_to_pfn(map_start), page_to_pfn(map_end), MEMINIT_EARLY, NULL, MIGRATE_MOVABLE); return 0; } @@ -546,7 +546,7 @@ memmap_init (unsigned long size, int nid, unsigned long zone, unsigned long start_pfn) { if (!vmem_map) { - memmap_init_zone(size, nid, zone, start_pfn, + memmap_init_zone(size, nid, zone, start_pfn, start_pfn + size, MEMINIT_EARLY, NULL, MIGRATE_MOVABLE); } else { struct page *start; diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 7d0f7682d01df6..6b1eca53e36cc8 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -102,14 +102,6 @@ static inline notrace unsigned long get_irq_happened(void) return happened; } -static inline notrace int decrementer_check_overflow(void) -{ - u64 now = get_tb(); - u64 *next_tb = this_cpu_ptr(&decrementers_next_tb); - - return now >= *next_tb; -} - #ifdef CONFIG_PPC_BOOK3E /* This is called whenever we are re-enabling interrupts @@ -142,35 +134,6 @@ notrace unsigned int __check_irq_replay(void) trace_hardirqs_on(); trace_hardirqs_off(); - /* - * We are always hard disabled here, but PACA_IRQ_HARD_DIS may - * not be set, which means interrupts have only just been hard - * disabled as part of the local_irq_restore or interrupt return - * code. In that case, skip the decrementr check becaus it's - * expensive to read the TB. - * - * HARD_DIS then gets cleared here, but it's reconciled later. - * Either local_irq_disable will replay the interrupt and that - * will reconcile state like other hard interrupts. Or interrupt - * retur will replay the interrupt and in that case it sets - * PACA_IRQ_HARD_DIS by hand (see comments in entry_64.S). - */ - if (happened & PACA_IRQ_HARD_DIS) { - local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS; - - /* - * We may have missed a decrementer interrupt if hard disabled. - * Check the decrementer register in case we had a rollover - * while hard disabled. - */ - if (!(happened & PACA_IRQ_DEC)) { - if (decrementer_check_overflow()) { - local_paca->irq_happened |= PACA_IRQ_DEC; - happened |= PACA_IRQ_DEC; - } - } - } - if (happened & PACA_IRQ_DEC) { local_paca->irq_happened &= ~PACA_IRQ_DEC; return 0x900; @@ -186,6 +149,9 @@ notrace unsigned int __check_irq_replay(void) return 0x280; } + if (happened & PACA_IRQ_HARD_DIS) + local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS; + /* There should be nothing left ! */ BUG_ON(local_paca->irq_happened != 0); @@ -229,18 +195,6 @@ void replay_soft_interrupts(void) if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) WARN_ON_ONCE(mfmsr() & MSR_EE); - if (happened & PACA_IRQ_HARD_DIS) { - /* - * We may have missed a decrementer interrupt if hard disabled. - * Check the decrementer register in case we had a rollover - * while hard disabled. - */ - if (!(happened & PACA_IRQ_DEC)) { - if (decrementer_check_overflow()) - happened |= PACA_IRQ_DEC; - } - } - /* * Force the delivery of pending soft-disabled interrupts on PS3. * Any HV call will have this side effect. @@ -345,6 +299,7 @@ notrace void arch_local_irq_restore(unsigned long mask) if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) WARN_ON_ONCE(!(mfmsr() & MSR_EE)); __hard_irq_disable(); + local_paca->irq_happened |= PACA_IRQ_HARD_DIS; } else { /* * We should already be hard disabled here. We had bugs diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 74efe46f55327f..7d372ff3504b27 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -552,14 +552,11 @@ void timer_interrupt(struct pt_regs *regs) struct pt_regs *old_regs; u64 now; - /* Some implementations of hotplug will get timer interrupts while - * offline, just ignore these and we also need to set - * decrementers_next_tb as MAX to make sure __check_irq_replay - * don't replay timer interrupt when return, otherwise we'll trap - * here infinitely :( + /* + * Some implementations of hotplug will get timer interrupts while + * offline, just ignore these. */ if (unlikely(!cpu_online(smp_processor_id()))) { - *next_tb = ~(u64)0; set_dec(decrementer_max); return; } diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c index d95954ad4c0af5..c61c3b62c8c628 100644 --- a/arch/powerpc/platforms/powernv/opal.c +++ b/arch/powerpc/platforms/powernv/opal.c @@ -731,7 +731,7 @@ int opal_hmi_exception_early2(struct pt_regs *regs) return 1; } -/* HMI exception handler called in virtual mode during check_irq_replay. */ +/* HMI exception handler called in virtual mode when irqs are next enabled. */ int opal_handle_hmi_exception(struct pt_regs *regs) { /* diff --git a/arch/powerpc/sysdev/mpic_msgr.c b/arch/powerpc/sysdev/mpic_msgr.c index f6b253e2be4097..36ec0bdd8b63c4 100644 --- a/arch/powerpc/sysdev/mpic_msgr.c +++ b/arch/powerpc/sysdev/mpic_msgr.c @@ -191,7 +191,7 @@ static int mpic_msgr_probe(struct platform_device *dev) /* IO map the message register block. */ of_address_to_resource(np, 0, &rsrc); - msgr_block_addr = ioremap(rsrc.start, resource_size(&rsrc)); + msgr_block_addr = devm_ioremap(&dev->dev, rsrc.start, resource_size(&rsrc)); if (!msgr_block_addr) { dev_err(&dev->dev, "Failed to iomap MPIC message registers"); return -EFAULT; diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 6343dca0dbeb64..71203324ff42b0 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -406,6 +406,7 @@ ENTRY(system_call) mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC stg %r14,__PT_FLAGS(%r11) + xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) ENABLE_INTS .Lsysc_do_svc: # clear user controlled register to prevent speculative use @@ -422,7 +423,6 @@ ENTRY(system_call) jnl .Lsysc_nr_ok slag %r8,%r1,3 .Lsysc_nr_ok: - xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) stg %r2,__PT_ORIG_GPR2(%r11) stg %r7,STACK_FRAME_OVERHEAD(%r15) lg %r9,0(%r8,%r10) # get system call add. @@ -712,8 +712,8 @@ ENTRY(pgm_check_handler) mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE mvc __THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID -6: RESTORE_SM_CLEAR_PER - xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) +6: xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) + RESTORE_SM_CLEAR_PER larl %r1,pgm_check_table llgh %r10,__PT_INT_CODE+2(%r11) nill %r10,0x007f @@ -734,8 +734,8 @@ ENTRY(pgm_check_handler) # PER event in supervisor state, must be kprobes # .Lpgm_kprobe: - RESTORE_SM_CLEAR_PER xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) + RESTORE_SM_CLEAR_PER lgr %r2,%r11 # pass pointer to pt_regs brasl %r14,do_per_trap j .Lpgm_return @@ -777,10 +777,10 @@ ENTRY(io_int_handler) stmg %r8,%r9,__PT_PSW(%r11) mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) + xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) TSTMSK __LC_CPU_FLAGS,_CIF_IGNORE_IRQ jo .Lio_restore TRACE_IRQS_OFF - xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) .Lio_loop: lgr %r2,%r11 # pass pointer to pt_regs lghi %r3,IO_INTERRUPT @@ -980,10 +980,10 @@ ENTRY(ext_int_handler) mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS mvc __PT_INT_PARM_LONG(8,%r11),0(%r1) xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) + xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) TSTMSK __LC_CPU_FLAGS,_CIF_IGNORE_IRQ jo .Lio_restore TRACE_IRQS_OFF - xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) lgr %r2,%r11 # pass pointer to pt_regs lghi %r3,EXT_INTERRUPT brasl %r14,do_IRQ diff --git a/arch/um/drivers/random.c b/arch/um/drivers/random.c index ce115fce52f02a..e4b9b2ce9abf43 100644 --- a/arch/um/drivers/random.c +++ b/arch/um/drivers/random.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -18,9 +19,8 @@ #include /* - * core module and version information + * core module information */ -#define RNG_VERSION "1.0.0" #define RNG_MODULE_NAME "hw_random" /* Changed at init time, in the non-modular case, and at module load @@ -28,88 +28,36 @@ * protects against a module being loaded twice at the same time. */ static int random_fd = -1; -static DECLARE_WAIT_QUEUE_HEAD(host_read_wait); +static struct hwrng hwrng = { 0, }; +static DECLARE_COMPLETION(have_data); -static int rng_dev_open (struct inode *inode, struct file *filp) +static int rng_dev_read(struct hwrng *rng, void *buf, size_t max, bool block) { - /* enforce read-only access to this chrdev */ - if ((filp->f_mode & FMODE_READ) == 0) - return -EINVAL; - if ((filp->f_mode & FMODE_WRITE) != 0) - return -EINVAL; + int ret; - return 0; -} - -static atomic_t host_sleep_count = ATOMIC_INIT(0); - -static ssize_t rng_dev_read (struct file *filp, char __user *buf, size_t size, - loff_t *offp) -{ - u32 data; - int n, ret = 0, have_data; - - while (size) { - n = os_read_file(random_fd, &data, sizeof(data)); - if (n > 0) { - have_data = n; - while (have_data && size) { - if (put_user((u8) data, buf++)) { - ret = ret ? : -EFAULT; - break; - } - size--; - ret++; - have_data--; - data >>= 8; - } - } - else if (n == -EAGAIN) { - DECLARE_WAITQUEUE(wait, current); - - if (filp->f_flags & O_NONBLOCK) - return ret ? : -EAGAIN; - - atomic_inc(&host_sleep_count); + for (;;) { + ret = os_read_file(random_fd, buf, max); + if (block && ret == -EAGAIN) { add_sigio_fd(random_fd); - add_wait_queue(&host_read_wait, &wait); - set_current_state(TASK_INTERRUPTIBLE); + ret = wait_for_completion_killable(&have_data); - schedule(); - remove_wait_queue(&host_read_wait, &wait); + ignore_sigio_fd(random_fd); + deactivate_fd(random_fd, RANDOM_IRQ); - if (atomic_dec_and_test(&host_sleep_count)) { - ignore_sigio_fd(random_fd); - deactivate_fd(random_fd, RANDOM_IRQ); - } + if (ret < 0) + break; + } else { + break; } - else - return n; - - if (signal_pending (current)) - return ret ? : -ERESTARTSYS; } - return ret; -} -static const struct file_operations rng_chrdev_ops = { - .owner = THIS_MODULE, - .open = rng_dev_open, - .read = rng_dev_read, - .llseek = noop_llseek, -}; - -/* rng_init shouldn't be called more than once at boot time */ -static struct miscdevice rng_miscdev = { - HWRNG_MINOR, - RNG_MODULE_NAME, - &rng_chrdev_ops, -}; + return ret != -EAGAIN ? ret : 0; +} static irqreturn_t random_interrupt(int irq, void *data) { - wake_up(&host_read_wait); + complete(&have_data); return IRQ_HANDLED; } @@ -126,18 +74,19 @@ static int __init rng_init (void) goto out; random_fd = err; - err = um_request_irq(RANDOM_IRQ, random_fd, IRQ_READ, random_interrupt, 0, "random", NULL); if (err) goto err_out_cleanup_hw; sigio_broken(random_fd, 1); + hwrng.name = RNG_MODULE_NAME; + hwrng.read = rng_dev_read; + hwrng.quality = 1024; - err = misc_register (&rng_miscdev); + err = hwrng_register(&hwrng); if (err) { - printk (KERN_ERR RNG_MODULE_NAME ": misc device register " - "failed\n"); + pr_err(RNG_MODULE_NAME " registering failed (%d)\n", err); goto err_out_cleanup_hw; } out: @@ -161,8 +110,8 @@ static void cleanup(void) static void __exit rng_cleanup(void) { + hwrng_unregister(&hwrng); os_close_file(random_fd); - misc_deregister (&rng_miscdev); } module_init (rng_init); diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c index eae8c83364f718..b12c1b0d3e1d0b 100644 --- a/arch/um/drivers/ubd_kern.c +++ b/arch/um/drivers/ubd_kern.c @@ -47,18 +47,25 @@ /* Max request size is determined by sector mask - 32K */ #define UBD_MAX_REQUEST (8 * sizeof(long)) +struct io_desc { + char *buffer; + unsigned long length; + unsigned long sector_mask; + unsigned long long cow_offset; + unsigned long bitmap_words[2]; +}; + struct io_thread_req { struct request *req; int fds[2]; unsigned long offsets[2]; unsigned long long offset; - unsigned long length; - char *buffer; int sectorsize; - unsigned long sector_mask; - unsigned long long cow_offset; - unsigned long bitmap_words[2]; int error; + + int desc_cnt; + /* io_desc has to be the last element of the struct */ + struct io_desc io_desc[]; }; @@ -525,12 +532,7 @@ static void ubd_handler(void) blk_queue_max_write_zeroes_sectors(io_req->req->q, 0); blk_queue_flag_clear(QUEUE_FLAG_DISCARD, io_req->req->q); } - if ((io_req->error) || (io_req->buffer == NULL)) - blk_mq_end_request(io_req->req, io_req->error); - else { - if (!blk_update_request(io_req->req, io_req->error, io_req->length)) - __blk_mq_end_request(io_req->req, io_req->error); - } + blk_mq_end_request(io_req->req, io_req->error); kfree(io_req); } } @@ -946,6 +948,7 @@ static int ubd_add(int n, char **error_out) blk_queue_write_cache(ubd_dev->queue, true, false); blk_queue_max_segments(ubd_dev->queue, MAX_SG); + blk_queue_segment_boundary(ubd_dev->queue, PAGE_SIZE - 1); err = ubd_disk_register(UBD_MAJOR, ubd_dev->size, n, &ubd_gendisk[n]); if(err){ *error_out = "Failed to register device"; @@ -1289,37 +1292,74 @@ static void cowify_bitmap(__u64 io_offset, int length, unsigned long *cow_mask, *cow_offset += bitmap_offset; } -static void cowify_req(struct io_thread_req *req, unsigned long *bitmap, +static void cowify_req(struct io_thread_req *req, struct io_desc *segment, + unsigned long offset, unsigned long *bitmap, __u64 bitmap_offset, __u64 bitmap_len) { - __u64 sector = req->offset >> SECTOR_SHIFT; + __u64 sector = offset >> SECTOR_SHIFT; int i; - if (req->length > (sizeof(req->sector_mask) * 8) << SECTOR_SHIFT) + if (segment->length > (sizeof(segment->sector_mask) * 8) << SECTOR_SHIFT) panic("Operation too long"); if (req_op(req->req) == REQ_OP_READ) { - for (i = 0; i < req->length >> SECTOR_SHIFT; i++) { + for (i = 0; i < segment->length >> SECTOR_SHIFT; i++) { if(ubd_test_bit(sector + i, (unsigned char *) bitmap)) ubd_set_bit(i, (unsigned char *) - &req->sector_mask); + &segment->sector_mask); + } + } else { + cowify_bitmap(offset, segment->length, &segment->sector_mask, + &segment->cow_offset, bitmap, bitmap_offset, + segment->bitmap_words, bitmap_len); + } +} + +static void ubd_map_req(struct ubd *dev, struct io_thread_req *io_req, + struct request *req) +{ + struct bio_vec bvec; + struct req_iterator iter; + int i = 0; + unsigned long byte_offset = io_req->offset; + int op = req_op(req); + + if (op == REQ_OP_WRITE_ZEROES || op == REQ_OP_DISCARD) { + io_req->io_desc[0].buffer = NULL; + io_req->io_desc[0].length = blk_rq_bytes(req); + } else { + rq_for_each_segment(bvec, req, iter) { + BUG_ON(i >= io_req->desc_cnt); + + io_req->io_desc[i].buffer = + page_address(bvec.bv_page) + bvec.bv_offset; + io_req->io_desc[i].length = bvec.bv_len; + i++; + } + } + + if (dev->cow.file) { + for (i = 0; i < io_req->desc_cnt; i++) { + cowify_req(io_req, &io_req->io_desc[i], byte_offset, + dev->cow.bitmap, dev->cow.bitmap_offset, + dev->cow.bitmap_len); + byte_offset += io_req->io_desc[i].length; } + } - else cowify_bitmap(req->offset, req->length, &req->sector_mask, - &req->cow_offset, bitmap, bitmap_offset, - req->bitmap_words, bitmap_len); } -static int ubd_queue_one_vec(struct blk_mq_hw_ctx *hctx, struct request *req, - u64 off, struct bio_vec *bvec) +static struct io_thread_req *ubd_alloc_req(struct ubd *dev, struct request *req, + int desc_cnt) { - struct ubd *dev = hctx->queue->queuedata; struct io_thread_req *io_req; - int ret; + int i; - io_req = kmalloc(sizeof(struct io_thread_req), GFP_ATOMIC); + io_req = kmalloc(sizeof(*io_req) + + (desc_cnt * sizeof(struct io_desc)), + GFP_ATOMIC); if (!io_req) - return -ENOMEM; + return NULL; io_req->req = req; if (dev->cow.file) @@ -1327,26 +1367,41 @@ static int ubd_queue_one_vec(struct blk_mq_hw_ctx *hctx, struct request *req, else io_req->fds[0] = dev->fd; io_req->error = 0; - - if (bvec != NULL) { - io_req->buffer = page_address(bvec->bv_page) + bvec->bv_offset; - io_req->length = bvec->bv_len; - } else { - io_req->buffer = NULL; - io_req->length = blk_rq_bytes(req); - } - io_req->sectorsize = SECTOR_SIZE; io_req->fds[1] = dev->fd; - io_req->cow_offset = -1; - io_req->offset = off; - io_req->sector_mask = 0; + io_req->offset = (u64) blk_rq_pos(req) << SECTOR_SHIFT; io_req->offsets[0] = 0; io_req->offsets[1] = dev->cow.data_offset; - if (dev->cow.file) - cowify_req(io_req, dev->cow.bitmap, - dev->cow.bitmap_offset, dev->cow.bitmap_len); + for (i = 0 ; i < desc_cnt; i++) { + io_req->io_desc[i].sector_mask = 0; + io_req->io_desc[i].cow_offset = -1; + } + + return io_req; +} + +static int ubd_submit_request(struct ubd *dev, struct request *req) +{ + int segs = 0; + struct io_thread_req *io_req; + int ret; + int op = req_op(req); + + if (op == REQ_OP_FLUSH) + segs = 0; + else if (op == REQ_OP_WRITE_ZEROES || op == REQ_OP_DISCARD) + segs = 1; + else + segs = blk_rq_nr_phys_segments(req); + + io_req = ubd_alloc_req(dev, req, segs); + if (!io_req) + return -ENOMEM; + + io_req->desc_cnt = segs; + if (segs) + ubd_map_req(dev, io_req, req); ret = os_write_file(thread_fd, &io_req, sizeof(io_req)); if (ret != sizeof(io_req)) { @@ -1357,22 +1412,6 @@ static int ubd_queue_one_vec(struct blk_mq_hw_ctx *hctx, struct request *req, return ret; } -static int queue_rw_req(struct blk_mq_hw_ctx *hctx, struct request *req) -{ - struct req_iterator iter; - struct bio_vec bvec; - int ret; - u64 off = (u64)blk_rq_pos(req) << SECTOR_SHIFT; - - rq_for_each_segment(bvec, req, iter) { - ret = ubd_queue_one_vec(hctx, req, off, &bvec); - if (ret < 0) - return ret; - off += bvec.bv_len; - } - return 0; -} - static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx, const struct blk_mq_queue_data *bd) { @@ -1385,17 +1424,12 @@ static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx, spin_lock_irq(&ubd_dev->lock); switch (req_op(req)) { - /* operations with no lentgth/offset arguments */ case REQ_OP_FLUSH: - ret = ubd_queue_one_vec(hctx, req, 0, NULL); - break; case REQ_OP_READ: case REQ_OP_WRITE: - ret = queue_rw_req(hctx, req); - break; case REQ_OP_DISCARD: case REQ_OP_WRITE_ZEROES: - ret = ubd_queue_one_vec(hctx, req, (u64)blk_rq_pos(req) << 9, NULL); + ret = ubd_submit_request(ubd_dev, req); break; default: WARN_ON_ONCE(1); @@ -1483,22 +1517,22 @@ static int map_error(int error_code) * will result in unpredictable behaviour and/or crashes. */ -static int update_bitmap(struct io_thread_req *req) +static int update_bitmap(struct io_thread_req *req, struct io_desc *segment) { int n; - if(req->cow_offset == -1) + if (segment->cow_offset == -1) return map_error(0); - n = os_pwrite_file(req->fds[1], &req->bitmap_words, - sizeof(req->bitmap_words), req->cow_offset); - if (n != sizeof(req->bitmap_words)) + n = os_pwrite_file(req->fds[1], &segment->bitmap_words, + sizeof(segment->bitmap_words), segment->cow_offset); + if (n != sizeof(segment->bitmap_words)) return map_error(-n); return map_error(0); } -static void do_io(struct io_thread_req *req) +static void do_io(struct io_thread_req *req, struct io_desc *desc) { char *buf = NULL; unsigned long len; @@ -1513,21 +1547,20 @@ static void do_io(struct io_thread_req *req) return; } - nsectors = req->length / req->sectorsize; + nsectors = desc->length / req->sectorsize; start = 0; do { - bit = ubd_test_bit(start, (unsigned char *) &req->sector_mask); + bit = ubd_test_bit(start, (unsigned char *) &desc->sector_mask); end = start; while((end < nsectors) && - (ubd_test_bit(end, (unsigned char *) - &req->sector_mask) == bit)) + (ubd_test_bit(end, (unsigned char *) &desc->sector_mask) == bit)) end++; off = req->offset + req->offsets[bit] + start * req->sectorsize; len = (end - start) * req->sectorsize; - if (req->buffer != NULL) - buf = &req->buffer[start * req->sectorsize]; + if (desc->buffer != NULL) + buf = &desc->buffer[start * req->sectorsize]; switch (req_op(req->req)) { case REQ_OP_READ: @@ -1567,7 +1600,8 @@ static void do_io(struct io_thread_req *req) start = end; } while(start < nsectors); - req->error = update_bitmap(req); + req->offset += len; + req->error = update_bitmap(req, desc); } /* Changed in start_io_thread, which is serialized by being called only @@ -1600,8 +1634,13 @@ int io_thread(void *arg) } for (count = 0; count < n/sizeof(struct io_thread_req *); count++) { + struct io_thread_req *req = (*io_req_buffer)[count]; + int i; + io_count++; - do_io((*io_req_buffer)[count]); + for (i = 0; !req->error && i < req->desc_cnt; i++) + do_io(req, &(req->io_desc[i])); + } written = 0; diff --git a/block/blk-pm.c b/block/blk-pm.c index b85234d758f7b2..17bd020268d421 100644 --- a/block/blk-pm.c +++ b/block/blk-pm.c @@ -67,6 +67,10 @@ int blk_pre_runtime_suspend(struct request_queue *q) WARN_ON_ONCE(q->rpm_status != RPM_ACTIVE); + spin_lock_irq(&q->queue_lock); + q->rpm_status = RPM_SUSPENDING; + spin_unlock_irq(&q->queue_lock); + /* * Increase the pm_only counter before checking whether any * non-PM blk_queue_enter() calls are in progress to avoid that any @@ -89,15 +93,14 @@ int blk_pre_runtime_suspend(struct request_queue *q) /* Switch q_usage_counter back to per-cpu mode. */ blk_mq_unfreeze_queue(q); - spin_lock_irq(&q->queue_lock); - if (ret < 0) + if (ret < 0) { + spin_lock_irq(&q->queue_lock); + q->rpm_status = RPM_ACTIVE; pm_runtime_mark_last_busy(q->dev); - else - q->rpm_status = RPM_SUSPENDING; - spin_unlock_irq(&q->queue_lock); + spin_unlock_irq(&q->queue_lock); - if (ret) blk_clear_pm_only(q); + } return ret; } diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c index 78d635f1d15675..376164cdf2ea9b 100644 --- a/drivers/bluetooth/hci_h5.c +++ b/drivers/bluetooth/hci_h5.c @@ -251,8 +251,12 @@ static int h5_close(struct hci_uart *hu) if (h5->vnd && h5->vnd->close) h5->vnd->close(h5); - if (!hu->serdev) - kfree(h5); + if (hu->serdev) + serdev_device_close(hu->serdev); + + kfree_skb(h5->rx_skb); + kfree(h5); + h5 = NULL; return 0; } diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index e92c4d9469d822..5952210526aaac 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig @@ -540,15 +540,15 @@ endif # HW_RANDOM config UML_RANDOM depends on UML - tristate "Hardware random number generator" + select HW_RANDOM + tristate "UML Random Number Generator support" help This option enables UML's "hardware" random number generator. It attaches itself to the host's /dev/random, supplying as much entropy as the host has, rather than the small amount the UML gets from its - own drivers. It registers itself as a standard hardware random number - generator, major 10, minor 183, and the canonical device name is - /dev/hwrng. - The way to make use of this is to install the rng-tools package - (check your distro, or download from - http://sourceforge.net/projects/gkernel/). rngd periodically reads - /dev/hwrng and injects the entropy into /dev/random. + own drivers. It registers itself as a rng-core driver thus providing + a device which is usually called /dev/hwrng. This hardware random + number generator does feed into the kernel's random number generator + entropy pool. + + If unsure, say Y. diff --git a/drivers/dax/bus.c b/drivers/dax/bus.c index 27513d311242e8..de7b74505e75e7 100644 --- a/drivers/dax/bus.c +++ b/drivers/dax/bus.c @@ -367,19 +367,28 @@ void kill_dev_dax(struct dev_dax *dev_dax) } EXPORT_SYMBOL_GPL(kill_dev_dax); -static void free_dev_dax_ranges(struct dev_dax *dev_dax) +static void trim_dev_dax_range(struct dev_dax *dev_dax) { + int i = dev_dax->nr_range - 1; + struct range *range = &dev_dax->ranges[i].range; struct dax_region *dax_region = dev_dax->region; - int i; device_lock_assert(dax_region->dev); - for (i = 0; i < dev_dax->nr_range; i++) { - struct range *range = &dev_dax->ranges[i].range; - - __release_region(&dax_region->res, range->start, - range_len(range)); + dev_dbg(&dev_dax->dev, "delete range[%d]: %#llx:%#llx\n", i, + (unsigned long long)range->start, + (unsigned long long)range->end); + + __release_region(&dax_region->res, range->start, range_len(range)); + if (--dev_dax->nr_range == 0) { + kfree(dev_dax->ranges); + dev_dax->ranges = NULL; } - dev_dax->nr_range = 0; +} + +static void free_dev_dax_ranges(struct dev_dax *dev_dax) +{ + while (dev_dax->nr_range) + trim_dev_dax_range(dev_dax); } static void unregister_dev_dax(void *dev) @@ -804,15 +813,10 @@ static int alloc_dev_dax_range(struct dev_dax *dev_dax, u64 start, return 0; rc = devm_register_dax_mapping(dev_dax, dev_dax->nr_range - 1); - if (rc) { - dev_dbg(dev, "delete range[%d]: %pa:%pa\n", dev_dax->nr_range - 1, - &alloc->start, &alloc->end); - dev_dax->nr_range--; - __release_region(res, alloc->start, resource_size(alloc)); - return rc; - } + if (rc) + trim_dev_dax_range(dev_dax); - return 0; + return rc; } static int adjust_dev_dax_range(struct dev_dax *dev_dax, struct resource *res, resource_size_t size) @@ -885,12 +889,7 @@ static int dev_dax_shrink(struct dev_dax *dev_dax, resource_size_t size) if (shrink >= range_len(range)) { devm_release_action(dax_region->dev, unregister_dax_mapping, &mapping->dev); - __release_region(&dax_region->res, range->start, - range_len(range)); - dev_dax->nr_range--; - dev_dbg(dev, "delete range[%d]: %#llx:%#llx\n", i, - (unsigned long long) range->start, - (unsigned long long) range->end); + trim_dev_dax_range(dev_dax); to_shrink -= shrink; if (!to_shrink) break; @@ -1274,7 +1273,6 @@ static void dev_dax_release(struct device *dev) put_dax(dax_dev); free_dev_dax_id(dev_dax); dax_region_put(dax_region); - kfree(dev_dax->ranges); kfree(dev_dax->pgmap); kfree(dev_dax); } diff --git a/drivers/gpu/Makefile b/drivers/gpu/Makefile index 835c88318cec63..98ed73ada23a8d 100644 --- a/drivers/gpu/Makefile +++ b/drivers/gpu/Makefile @@ -6,3 +6,4 @@ obj-$(CONFIG_TEGRA_HOST1X) += host1x/ obj-y += drm/ vga/ obj-$(CONFIG_IMX_IPUV3_CORE) += ipu-v3/ obj-$(CONFIG_TRACE_GPU_MEM) += trace/ +obj-y += arm/ diff --git a/drivers/gpu/arm/Kbuild b/drivers/gpu/arm/Kbuild new file mode 100644 index 00000000000000..19c7e9a2659d23 --- /dev/null +++ b/drivers/gpu/arm/Kbuild @@ -0,0 +1,17 @@ +# +# (C) COPYRIGHT 2012 ARM Limited. All rights reserved. +# +# This program is free software and is provided to you under the terms of the +# GNU General Public License version 2 as published by the Free Software +# Foundation, and any use by you of this program is subject to the terms +# of such GNU licence. +# +# A copy of the licence is included with the program, and can also be obtained +# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, +# Boston, MA 02110-1301, USA. +# +# + + + +obj-$(CONFIG_MALI_MIDGARD) += midgard/ diff --git a/drivers/gpu/arm/Kconfig b/drivers/gpu/arm/Kconfig new file mode 100644 index 00000000000000..1f30eb541d6525 --- /dev/null +++ b/drivers/gpu/arm/Kconfig @@ -0,0 +1,19 @@ +# +# (C) COPYRIGHT 2012 ARM Limited. All rights reserved. +# +# This program is free software and is provided to you under the terms of the +# GNU General Public License version 2 as published by the Free Software +# Foundation, and any use by you of this program is subject to the terms +# of such GNU licence. +# +# A copy of the licence is included with the program, and can also be obtained +# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, +# Boston, MA 02110-1301, USA. +# +# + + + +menu "ARM GPU Configuration" +source "drivers/gpu/arm/midgard/Kconfig" +endmenu diff --git a/drivers/gpu/arm/midgard/Kbuild b/drivers/gpu/arm/midgard/Kbuild new file mode 100644 index 00000000000000..4edc5da6e63e17 --- /dev/null +++ b/drivers/gpu/arm/midgard/Kbuild @@ -0,0 +1,172 @@ +# +# (C) COPYRIGHT 2012-2016, 2017 ARM Limited. All rights reserved. +# +# This program is free software and is provided to you under the terms of the +# GNU General Public License version 2 as published by the Free Software +# Foundation, and any use by you of this program is subject to the terms +# of such GNU licence. +# +# A copy of the licence is included with the program, and can also be obtained +# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, +# Boston, MA 02110-1301, USA. +# +# + + + +# Driver version string which is returned to userspace via an ioctl +MALI_RELEASE_NAME ?= "r21p0-01rel0" + +# Paths required for build +KBASE_PATH = $(src) +KBASE_PLATFORM_PATH = $(KBASE_PATH)/platform_dummy +UMP_PATH = $(src)/../../../base + +ifeq ($(CONFIG_MALI_ERROR_INJECT),y) +MALI_ERROR_INJECT_ON = 1 +endif + +# Set up defaults if not defined by build system +MALI_CUSTOMER_RELEASE ?= 1 +MALI_UNIT_TEST ?= 0 +MALI_KERNEL_TEST_API ?= 0 +MALI_ERROR_INJECT_ON ?= 0 +MALI_MOCK_TEST ?= 0 +MALI_COVERAGE ?= 0 +CONFIG_MALI_PLATFORM_NAME ?= "devicetree" +# This workaround is for what seems to be a compiler bug we observed in +# GCC 4.7 on AOSP 4.3. The bug caused an intermittent failure compiling +# the "_Pragma" syntax, where an error message is returned: +# +# "internal compiler error: unspellable token PRAGMA" +# +# This regression has thus far only been seen on the GCC 4.7 compiler bundled +# with AOSP 4.3.0. So this makefile, intended for in-tree kernel builds +# which are not known to be used with AOSP, is hardcoded to disable the +# workaround, i.e. set the define to 0. +MALI_GCC_WORKAROUND_MIDCOM_4598 ?= 0 + +# Set up our defines, which will be passed to gcc +DEFINES = \ + -DMALI_CUSTOMER_RELEASE=$(MALI_CUSTOMER_RELEASE) \ + -DMALI_KERNEL_TEST_API=$(MALI_KERNEL_TEST_API) \ + -DMALI_UNIT_TEST=$(MALI_UNIT_TEST) \ + -DMALI_ERROR_INJECT_ON=$(MALI_ERROR_INJECT_ON) \ + -DMALI_MOCK_TEST=$(MALI_MOCK_TEST) \ + -DMALI_COVERAGE=$(MALI_COVERAGE) \ + -DMALI_RELEASE_NAME=\"$(MALI_RELEASE_NAME)\" \ + -DMALI_GCC_WORKAROUND_MIDCOM_4598=$(MALI_GCC_WORKAROUND_MIDCOM_4598) + +ifeq ($(KBUILD_EXTMOD),) +# in-tree +DEFINES +=-DMALI_KBASE_PLATFORM_PATH=../../$(src)/platform/$(CONFIG_MALI_PLATFORM_NAME) +else +# out-of-tree +DEFINES +=-DMALI_KBASE_PLATFORM_PATH=$(src)/platform/$(CONFIG_MALI_PLATFORM_NAME) +endif + +DEFINES += -I$(srctree)/drivers/staging/android + +# Use our defines when compiling +ccflags-y += $(DEFINES) -I$(KBASE_PATH) -I$(KBASE_PLATFORM_PATH) -I$(UMP_PATH) -I$(srctree)/include/linux +subdir-ccflags-y += $(DEFINES) -I$(KBASE_PATH) -I$(KBASE_PLATFORM_PATH) -I$(OSK_PATH) -I$(UMP_PATH) -I$(srctree)/include/linux + +SRC := \ + mali_kbase_device.c \ + mali_kbase_cache_policy.c \ + mali_kbase_mem.c \ + mali_kbase_mmu.c \ + mali_kbase_ctx_sched.c \ + mali_kbase_jd.c \ + mali_kbase_jd_debugfs.c \ + mali_kbase_jm.c \ + mali_kbase_gpuprops.c \ + mali_kbase_js.c \ + mali_kbase_js_ctx_attr.c \ + mali_kbase_event.c \ + mali_kbase_context.c \ + mali_kbase_pm.c \ + mali_kbase_config.c \ + mali_kbase_vinstr.c \ + mali_kbase_softjobs.c \ + mali_kbase_10969_workaround.c \ + mali_kbase_hw.c \ + mali_kbase_utility.c \ + mali_kbase_debug.c \ + mali_kbase_trace_timeline.c \ + mali_kbase_gpu_memory_debugfs.c \ + mali_kbase_mem_linux.c \ + mali_kbase_core_linux.c \ + mali_kbase_replay.c \ + mali_kbase_mem_profile_debugfs.c \ + mali_kbase_mmu_mode_lpae.c \ + mali_kbase_mmu_mode_aarch64.c \ + mali_kbase_disjoint_events.c \ + mali_kbase_gator_api.c \ + mali_kbase_debug_mem_view.c \ + mali_kbase_debug_job_fault.c \ + mali_kbase_smc.c \ + mali_kbase_mem_pool.c \ + mali_kbase_mem_pool_debugfs.c \ + mali_kbase_tlstream.c \ + mali_kbase_strings.c \ + mali_kbase_as_fault_debugfs.c \ + mali_kbase_regs_history_debugfs.c + + + + +ifeq ($(MALI_UNIT_TEST),1) + SRC += mali_kbase_tlstream_test.c +endif + +ifeq ($(MALI_CUSTOMER_RELEASE),0) + SRC += mali_kbase_regs_dump_debugfs.c +endif + + +ccflags-y += -I$(KBASE_PATH) + +# Tell the Linux build system from which .o file to create the kernel module +obj-$(CONFIG_MALI_MIDGARD) += mali_kbase.o + +# Tell the Linux build system to enable building of our .c files +mali_kbase-y := $(SRC:.c=.o) + +# Kconfig passes in the name with quotes for in-tree builds - remove them. +platform_name := $(shell echo $(CONFIG_MALI_PLATFORM_NAME)) +MALI_PLATFORM_DIR := platform/$(platform_name) +ccflags-y += -I$(src)/$(MALI_PLATFORM_DIR) +include $(src)/$(MALI_PLATFORM_DIR)/Kbuild + +ifeq ($(CONFIG_MALI_DEVFREQ),y) + ifeq ($(CONFIG_DEVFREQ_THERMAL),y) + include $(src)/ipa/Kbuild + endif +endif + +mali_kbase-$(CONFIG_MALI_DMA_FENCE) += \ + mali_kbase_dma_fence.o \ + mali_kbase_fence.o +mali_kbase-$(CONFIG_SYNC) += \ + mali_kbase_sync_android.o \ + mali_kbase_sync_common.o +mali_kbase-$(CONFIG_SYNC_FILE) += \ + mali_kbase_sync_file.o \ + mali_kbase_sync_common.o \ + mali_kbase_fence.o + +ifeq ($(MALI_MOCK_TEST),1) +# Test functionality +mali_kbase-y += tests/internal/src/mock/mali_kbase_pm_driver_mock.o +endif + +include $(src)/backend/gpu/Kbuild +mali_kbase-y += $(BACKEND:.c=.o) + + +ccflags-y += -I$(src)/backend/gpu +subdir-ccflags-y += -I$(src)/backend/gpu + +# For kutf and mali_kutf_irq_latency_test +obj-$(CONFIG_MALI_KUTF) += tests/ diff --git a/drivers/gpu/arm/midgard/Kconfig b/drivers/gpu/arm/midgard/Kconfig new file mode 100644 index 00000000000000..bcc2d4a3f65eb4 --- /dev/null +++ b/drivers/gpu/arm/midgard/Kconfig @@ -0,0 +1,201 @@ +# +# (C) COPYRIGHT 2012-2017 ARM Limited. All rights reserved. +# +# This program is free software and is provided to you under the terms of the +# GNU General Public License version 2 as published by the Free Software +# Foundation, and any use by you of this program is subject to the terms +# of such GNU licence. +# +# A copy of the licence is included with the program, and can also be obtained +# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, +# Boston, MA 02110-1301, USA. +# +# + + + +menuconfig MALI_MIDGARD + tristate "Mali Midgard series support" + select GPU_TRACEPOINTS if ANDROID + default n + help + Enable this option to build support for a ARM Mali Midgard GPU. + + To compile this driver as a module, choose M here: + this will generate a single module, called mali_kbase. + +config MALI_GATOR_SUPPORT + bool "Streamline support via Gator" + depends on MALI_MIDGARD + default n + help + Adds diagnostic support for use with the ARM Streamline Performance Analyzer. + You will need the Gator device driver already loaded before loading this driver when enabling + Streamline debug support. + This is a legacy interface required by older versions of Streamline. + +config MALI_MIDGARD_DVFS + bool "Enable legacy DVFS" + depends on MALI_MIDGARD && !MALI_DEVFREQ + default n + help + Choose this option to enable legacy DVFS in the Mali Midgard DDK. + +config MALI_MIDGARD_ENABLE_TRACE + bool "Enable kbase tracing" + depends on MALI_MIDGARD + default n + help + Enables tracing in kbase. Trace log available through + the "mali_trace" debugfs file, when the CONFIG_DEBUG_FS is enabled + +config MALI_DEVFREQ + bool "devfreq support for Mali" + depends on MALI_MIDGARD && PM_DEVFREQ + help + Support devfreq for Mali. + + Using the devfreq framework and, by default, the simpleondemand + governor, the frequency of Mali will be dynamically selected from the + available OPPs. + +config MALI_DMA_FENCE + bool "DMA_BUF fence support for Mali" + depends on MALI_MIDGARD + default n + help + Support DMA_BUF fences for Mali. + + This option should only be enabled if the Linux Kernel has built in + support for DMA_BUF fences. + +config MALI_PLATFORM_NAME + depends on MALI_MIDGARD + string "Platform name" + default "devicetree" + help + Enter the name of the desired platform configuration directory to + include in the build. 'platform/$(MALI_PLATFORM_NAME)/Kbuild' must + exist. + +# MALI_EXPERT configuration options + +menuconfig MALI_EXPERT + depends on MALI_MIDGARD + bool "Enable Expert Settings" + default n + help + Enabling this option and modifying the default settings may produce a driver with performance or + other limitations. + +config MALI_CORESTACK + bool "Support controlling power to the GPU core stack" + depends on MALI_MIDGARD && MALI_EXPERT + default n + help + Enabling this feature on supported GPUs will let the driver powering + on/off the GPU core stack independently without involving the Power + Domain Controller. This should only be enabled on platforms which + integration of the PDC to the Mali GPU is known to be problematic. + This feature is currently only supported on t-Six and t-HEx GPUs. + + If unsure, say N. + +config MALI_PRFCNT_SET_SECONDARY + bool "Use secondary set of performance counters" + depends on MALI_MIDGARD && MALI_EXPERT + default n + help + Select this option to use secondary set of performance counters. Kernel + features that depend on an access to the primary set of counters may + become unavailable. Enabling this option will prevent power management + from working optimally and may cause instrumentation tools to return + bogus results. + + If unsure, say N. + +config MALI_DEBUG + bool "Debug build" + depends on MALI_MIDGARD && MALI_EXPERT + default n + help + Select this option for increased checking and reporting of errors. + +config MALI_FENCE_DEBUG + bool "Debug sync fence usage" + depends on MALI_MIDGARD && MALI_EXPERT && (SYNC || SYNC_FILE) + default y if MALI_DEBUG + help + Select this option to enable additional checking and reporting on the + use of sync fences in the Mali driver. + + This will add a 3s timeout to all sync fence waits in the Mali + driver, so that when work for Mali has been waiting on a sync fence + for a long time a debug message will be printed, detailing what fence + is causing the block, and which dependent Mali atoms are blocked as a + result of this. + + The timeout can be changed at runtime through the js_soft_timeout + device attribute, where the timeout is specified in milliseconds. + +config MALI_NO_MALI + bool "No Mali" + depends on MALI_MIDGARD && MALI_EXPERT + default n + help + This can be used to test the driver in a simulated environment + whereby the hardware is not physically present. If the hardware is physically + present it will not be used. This can be used to test the majority of the + driver without needing actual hardware or for software benchmarking. + All calls to the simulated hardware will complete immediately as if the hardware + completed the task. + +config MALI_ERROR_INJECT + bool "Error injection" + depends on MALI_MIDGARD && MALI_EXPERT && MALI_NO_MALI + default n + help + Enables insertion of errors to test module failure and recovery mechanisms. + +config MALI_TRACE_TIMELINE + bool "Timeline tracing" + depends on MALI_MIDGARD && MALI_EXPERT + default n + help + Enables timeline tracing through the kernel tracepoint system. + +config MALI_SYSTEM_TRACE + bool "Enable system event tracing support" + depends on MALI_MIDGARD && MALI_EXPERT + default n + help + Choose this option to enable system trace events for each + kbase event. This is typically used for debugging but has + minimal overhead when not in use. Enable only if you know what + you are doing. + +config MALI_2MB_ALLOC + bool "Attempt to allocate 2MB pages" + depends on MALI_MIDGARD && MALI_EXPERT + default n + help + Rather than allocating all GPU memory page-by-page, attempt to + allocate 2MB pages from the kernel. This reduces TLB pressure and + helps to prevent memory fragmentation. + + If in doubt, say N + +config MALI_PWRSOFT_765 + bool "PWRSOFT-765 ticket" + depends on MALI_MIDGARD && MALI_EXPERT + default n + help + PWRSOFT-765 fixes devfreq cooling devices issues. The fix was merged + in kernel v4.10, however if backported into the kernel then this + option must be manually selected. + + If using kernel >= v4.10 then say N, otherwise if devfreq cooling + changes have been backported say Y to avoid compilation errors. + +source "drivers/gpu/arm/midgard/platform/Kconfig" +source "drivers/gpu/arm/midgard/tests/Kconfig" diff --git a/drivers/gpu/arm/midgard/Makefile b/drivers/gpu/arm/midgard/Makefile new file mode 100644 index 00000000000000..b7b261a0ae5001 --- /dev/null +++ b/drivers/gpu/arm/midgard/Makefile @@ -0,0 +1,38 @@ +# +# (C) COPYRIGHT 2010-2016, 2017 ARM Limited. All rights reserved. +# +# This program is free software and is provided to you under the terms of the +# GNU General Public License version 2 as published by the Free Software +# Foundation, and any use by you of this program is subject to the terms +# of such GNU licence. +# +# A copy of the licence is included with the program, and can also be obtained +# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, +# Boston, MA 02110-1301, USA. +# +# + + + +KDIR ?= /lib/modules/$(shell uname -r)/build + +BUSLOG_PATH_RELATIVE = $(CURDIR)/../../../.. +UMP_PATH_RELATIVE = $(CURDIR)/../../../base/ump +KBASE_PATH_RELATIVE = $(CURDIR) +EXTRA_SYMBOLS = $(UMP_PATH_RELATIVE)/src/Module.symvers + +ifeq ($(MALI_UNIT_TEST), 1) + EXTRA_SYMBOLS += $(KBASE_PATH_RELATIVE)/tests/internal/src/kernel_assert_module/linux/Module.symvers +endif + +ifeq ($(CONFIG_MALI_FPGA_BUS_LOGGER),y) +#Add bus logger symbols +EXTRA_SYMBOLS += $(BUSLOG_PATH_RELATIVE)/drivers/base/bus_logger/Module.symvers +endif + +# we get the symbols from modules using KBUILD_EXTRA_SYMBOLS to prevent warnings about unknown functions +all: + $(MAKE) -C $(KDIR) M=$(CURDIR) EXTRA_CFLAGS="-I$(CURDIR)/../../../../include -I$(CURDIR)/../../../../tests/include $(SCONS_CFLAGS)" $(SCONS_CONFIGS) KBUILD_EXTRA_SYMBOLS="$(EXTRA_SYMBOLS)" modules + +clean: + $(MAKE) -C $(KDIR) M=$(CURDIR) clean diff --git a/drivers/gpu/arm/midgard/Makefile.kbase b/drivers/gpu/arm/midgard/Makefile.kbase new file mode 100644 index 00000000000000..2bef9c25eaeb89 --- /dev/null +++ b/drivers/gpu/arm/midgard/Makefile.kbase @@ -0,0 +1,17 @@ +# +# (C) COPYRIGHT 2010 ARM Limited. All rights reserved. +# +# This program is free software and is provided to you under the terms of the +# GNU General Public License version 2 as published by the Free Software +# Foundation, and any use by you of this program is subject to the terms +# of such GNU licence. +# +# A copy of the licence is included with the program, and can also be obtained +# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, +# Boston, MA 02110-1301, USA. +# +# + + +EXTRA_CFLAGS += -I$(ROOT) -I$(KBASE_PATH) -I$(OSK_PATH)/src/linux/include -I$(KBASE_PATH)/platform_$(PLATFORM) + diff --git a/drivers/gpu/arm/midgard/backend/gpu/Kbuild b/drivers/gpu/arm/midgard/backend/gpu/Kbuild new file mode 100644 index 00000000000000..5f700e9b6b44fb --- /dev/null +++ b/drivers/gpu/arm/midgard/backend/gpu/Kbuild @@ -0,0 +1,60 @@ +# +# (C) COPYRIGHT 2014,2017 ARM Limited. All rights reserved. +# +# This program is free software and is provided to you under the terms of the +# GNU General Public License version 2 as published by the Free Software +# Foundation, and any use by you of this program is subject to the terms +# of such GNU licence. +# +# A copy of the licence is included with the program, and can also be obtained +# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, +# Boston, MA 02110-1301, USA. +# +# + + +BACKEND += \ + backend/gpu/mali_kbase_cache_policy_backend.c \ + backend/gpu/mali_kbase_device_hw.c \ + backend/gpu/mali_kbase_gpu.c \ + backend/gpu/mali_kbase_gpuprops_backend.c \ + backend/gpu/mali_kbase_debug_job_fault_backend.c \ + backend/gpu/mali_kbase_irq_linux.c \ + backend/gpu/mali_kbase_instr_backend.c \ + backend/gpu/mali_kbase_jm_as.c \ + backend/gpu/mali_kbase_jm_hw.c \ + backend/gpu/mali_kbase_jm_rb.c \ + backend/gpu/mali_kbase_js_affinity.c \ + backend/gpu/mali_kbase_js_backend.c \ + backend/gpu/mali_kbase_mmu_hw_direct.c \ + backend/gpu/mali_kbase_pm_backend.c \ + backend/gpu/mali_kbase_pm_driver.c \ + backend/gpu/mali_kbase_pm_metrics.c \ + backend/gpu/mali_kbase_pm_ca.c \ + backend/gpu/mali_kbase_pm_ca_fixed.c \ + backend/gpu/mali_kbase_pm_always_on.c \ + backend/gpu/mali_kbase_pm_coarse_demand.c \ + backend/gpu/mali_kbase_pm_demand.c \ + backend/gpu/mali_kbase_pm_policy.c \ + backend/gpu/mali_kbase_time.c + +ifeq ($(MALI_CUSTOMER_RELEASE),0) +BACKEND += \ + backend/gpu/mali_kbase_pm_ca_random.c \ + backend/gpu/mali_kbase_pm_demand_always_powered.c \ + backend/gpu/mali_kbase_pm_fast_start.c +endif + +ifeq ($(CONFIG_MALI_DEVFREQ),y) +BACKEND += \ + backend/gpu/mali_kbase_devfreq.c \ + backend/gpu/mali_kbase_pm_ca_devfreq.c +endif + +ifeq ($(CONFIG_MALI_NO_MALI),y) + # Dummy model + BACKEND += backend/gpu/mali_kbase_model_dummy.c + BACKEND += backend/gpu/mali_kbase_model_linux.c + # HW error simulation + BACKEND += backend/gpu/mali_kbase_model_error_generator.c +endif diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_backend_config.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_backend_config.h new file mode 100644 index 00000000000000..c8ae87eb84a283 --- /dev/null +++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_backend_config.h @@ -0,0 +1,29 @@ +/* + * + * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +/* + * Backend specific configuration + */ + +#ifndef _KBASE_BACKEND_CONFIG_H_ +#define _KBASE_BACKEND_CONFIG_H_ + +/* Enable GPU reset API */ +#define KBASE_GPU_RESET_EN 1 + +#endif /* _KBASE_BACKEND_CONFIG_H_ */ + diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_cache_policy_backend.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_cache_policy_backend.c new file mode 100644 index 00000000000000..fef9a2cb743ebd --- /dev/null +++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_cache_policy_backend.c @@ -0,0 +1,29 @@ +/* + * + * (C) COPYRIGHT 2015-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +#include "backend/gpu/mali_kbase_cache_policy_backend.h" +#include + +void kbase_cache_set_coherency_mode(struct kbase_device *kbdev, + u32 mode) +{ + kbdev->current_gpu_coherency_mode = mode; + + if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_COHERENCY_REG)) + kbase_reg_write(kbdev, COHERENCY_ENABLE, mode, NULL); +} + diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_cache_policy_backend.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_cache_policy_backend.h new file mode 100644 index 00000000000000..fe9869109a8257 --- /dev/null +++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_cache_policy_backend.h @@ -0,0 +1,34 @@ +/* + * + * (C) COPYRIGHT 2015-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + +#ifndef _KBASE_CACHE_POLICY_BACKEND_H_ +#define _KBASE_CACHE_POLICY_BACKEND_H_ + +#include "mali_kbase.h" +#include "mali_base_kernel.h" + +/** + * kbase_cache_set_coherency_mode() - Sets the system coherency mode + * in the GPU. + * @kbdev: Device pointer + * @mode: Coherency mode. COHERENCY_ACE/ACE_LITE + */ +void kbase_cache_set_coherency_mode(struct kbase_device *kbdev, + u32 mode); + +#endif /* _KBASE_CACHE_POLICY_H_ */ diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_debug_job_fault_backend.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_debug_job_fault_backend.c new file mode 100644 index 00000000000000..7851ea6466c78e --- /dev/null +++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_debug_job_fault_backend.c @@ -0,0 +1,157 @@ +/* + * + * (C) COPYRIGHT 2012-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +#include +#include +#include "mali_kbase_debug_job_fault.h" + +#ifdef CONFIG_DEBUG_FS + +/*GPU_CONTROL_REG(r)*/ +static int gpu_control_reg_snapshot[] = { + GPU_ID, + SHADER_READY_LO, + SHADER_READY_HI, + TILER_READY_LO, + TILER_READY_HI, + L2_READY_LO, + L2_READY_HI +}; + +/* JOB_CONTROL_REG(r) */ +static int job_control_reg_snapshot[] = { + JOB_IRQ_MASK, + JOB_IRQ_STATUS +}; + +/* JOB_SLOT_REG(n,r) */ +static int job_slot_reg_snapshot[] = { + JS_HEAD_LO, + JS_HEAD_HI, + JS_TAIL_LO, + JS_TAIL_HI, + JS_AFFINITY_LO, + JS_AFFINITY_HI, + JS_CONFIG, + JS_STATUS, + JS_HEAD_NEXT_LO, + JS_HEAD_NEXT_HI, + JS_AFFINITY_NEXT_LO, + JS_AFFINITY_NEXT_HI, + JS_CONFIG_NEXT +}; + +/*MMU_REG(r)*/ +static int mmu_reg_snapshot[] = { + MMU_IRQ_MASK, + MMU_IRQ_STATUS +}; + +/* MMU_AS_REG(n,r) */ +static int as_reg_snapshot[] = { + AS_TRANSTAB_LO, + AS_TRANSTAB_HI, + AS_MEMATTR_LO, + AS_MEMATTR_HI, + AS_FAULTSTATUS, + AS_FAULTADDRESS_LO, + AS_FAULTADDRESS_HI, + AS_STATUS +}; + +bool kbase_debug_job_fault_reg_snapshot_init(struct kbase_context *kctx, + int reg_range) +{ + int i, j; + int offset = 0; + int slot_number; + int as_number; + + if (kctx->reg_dump == NULL) + return false; + + slot_number = kctx->kbdev->gpu_props.num_job_slots; + as_number = kctx->kbdev->gpu_props.num_address_spaces; + + /* get the GPU control registers*/ + for (i = 0; i < sizeof(gpu_control_reg_snapshot)/4; i++) { + kctx->reg_dump[offset] = + GPU_CONTROL_REG(gpu_control_reg_snapshot[i]); + offset += 2; + } + + /* get the Job control registers*/ + for (i = 0; i < sizeof(job_control_reg_snapshot)/4; i++) { + kctx->reg_dump[offset] = + JOB_CONTROL_REG(job_control_reg_snapshot[i]); + offset += 2; + } + + /* get the Job Slot registers*/ + for (j = 0; j < slot_number; j++) { + for (i = 0; i < sizeof(job_slot_reg_snapshot)/4; i++) { + kctx->reg_dump[offset] = + JOB_SLOT_REG(j, job_slot_reg_snapshot[i]); + offset += 2; + } + } + + /* get the MMU registers*/ + for (i = 0; i < sizeof(mmu_reg_snapshot)/4; i++) { + kctx->reg_dump[offset] = MMU_REG(mmu_reg_snapshot[i]); + offset += 2; + } + + /* get the Address space registers*/ + for (j = 0; j < as_number; j++) { + for (i = 0; i < sizeof(as_reg_snapshot)/4; i++) { + kctx->reg_dump[offset] = + MMU_AS_REG(j, as_reg_snapshot[i]); + offset += 2; + } + } + + WARN_ON(offset >= (reg_range*2/4)); + + /* set the termination flag*/ + kctx->reg_dump[offset] = REGISTER_DUMP_TERMINATION_FLAG; + kctx->reg_dump[offset + 1] = REGISTER_DUMP_TERMINATION_FLAG; + + dev_dbg(kctx->kbdev->dev, "kbase_job_fault_reg_snapshot_init:%d\n", + offset); + + return true; +} + +bool kbase_job_fault_get_reg_snapshot(struct kbase_context *kctx) +{ + int offset = 0; + + if (kctx->reg_dump == NULL) + return false; + + while (kctx->reg_dump[offset] != REGISTER_DUMP_TERMINATION_FLAG) { + kctx->reg_dump[offset+1] = + kbase_reg_read(kctx->kbdev, + kctx->reg_dump[offset], NULL); + offset += 2; + } + return true; +} + + +#endif diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_devfreq.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_devfreq.c new file mode 100644 index 00000000000000..1bba1b71df63de --- /dev/null +++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_devfreq.c @@ -0,0 +1,406 @@ +/* + * + * (C) COPYRIGHT 2014-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +#include +#include +#include +#include + +#include +#include +#include +#ifdef CONFIG_DEVFREQ_THERMAL +#include +#endif + +#include +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0) +#include +#else /* Linux >= 3.13 */ +/* In 3.13 the OPP include header file, types, and functions were all + * renamed. Use the old filename for the include, and define the new names to + * the old, when an old kernel is detected. + */ +#include +#define dev_pm_opp opp +#define dev_pm_opp_get_voltage opp_get_voltage +#define dev_pm_opp_get_opp_count opp_get_opp_count +#define dev_pm_opp_find_freq_ceil opp_find_freq_ceil +#define dev_pm_opp_find_freq_floor opp_find_freq_floor +#endif /* Linux >= 3.13 */ + +/** + * opp_translate - Translate nominal OPP frequency from devicetree into real + * frequency and core mask + * @kbdev: Device pointer + * @freq: Nominal frequency + * @core_mask: Pointer to u64 to store core mask to + * + * Return: Real target frequency + * + * This function will only perform translation if an operating-points-v2-mali + * table is present in devicetree. If one is not present then it will return an + * untranslated frequency and all cores enabled. + */ +static unsigned long opp_translate(struct kbase_device *kbdev, + unsigned long freq, u64 *core_mask) +{ + int i; + + for (i = 0; i < kbdev->num_opps; i++) { + if (kbdev->opp_table[i].opp_freq == freq) { + *core_mask = kbdev->opp_table[i].core_mask; + return kbdev->opp_table[i].real_freq; + } + } + + /* Failed to find OPP - return all cores enabled & nominal frequency */ + *core_mask = kbdev->gpu_props.props.raw_props.shader_present; + + return freq; +} + +static int +kbase_devfreq_target(struct device *dev, unsigned long *target_freq, u32 flags) +{ + struct kbase_device *kbdev = dev_get_drvdata(dev); + struct dev_pm_opp *opp; + unsigned long nominal_freq; + unsigned long freq = 0; + unsigned long voltage; + int err; + u64 core_mask; + + freq = *target_freq; + + opp = devfreq_recommended_opp(dev, &freq, flags); + voltage = dev_pm_opp_get_voltage(opp); + if (IS_ERR_OR_NULL(opp)) { + dev_err(dev, "Failed to get opp (%ld)\n", PTR_ERR(opp)); + return PTR_ERR(opp); + } + + nominal_freq = freq; + + /* + * Only update if there is a change of frequency + */ + if (kbdev->current_nominal_freq == nominal_freq) { + *target_freq = nominal_freq; + return 0; + } + + freq = opp_translate(kbdev, nominal_freq, &core_mask); +#ifdef CONFIG_REGULATOR + if (kbdev->regulator && kbdev->current_voltage != voltage + && kbdev->current_freq < freq) { + err = regulator_set_voltage(kbdev->regulator, voltage, voltage); + if (err) { + dev_err(dev, "Failed to increase voltage (%d)\n", err); + return err; + } + } +#endif + + err = clk_set_rate(kbdev->clock, freq); + if (err) { + dev_err(dev, "Failed to set clock %lu (target %lu)\n", + freq, *target_freq); + return err; + } + +#ifdef CONFIG_REGULATOR + if (kbdev->regulator && kbdev->current_voltage != voltage + && kbdev->current_freq > freq) { + err = regulator_set_voltage(kbdev->regulator, voltage, voltage); + if (err) { + dev_err(dev, "Failed to decrease voltage (%d)\n", err); + return err; + } + } +#endif + + if (kbdev->pm.backend.ca_current_policy->id == + KBASE_PM_CA_POLICY_ID_DEVFREQ) + kbase_devfreq_set_core_mask(kbdev, core_mask); + + *target_freq = nominal_freq; + kbdev->current_voltage = voltage; + kbdev->current_nominal_freq = nominal_freq; + kbdev->current_freq = freq; + kbdev->current_core_mask = core_mask; + + KBASE_TLSTREAM_AUX_DEVFREQ_TARGET((u64)nominal_freq); + + kbase_pm_reset_dvfs_utilisation(kbdev); + + return err; +} + +static int +kbase_devfreq_cur_freq(struct device *dev, unsigned long *freq) +{ + struct kbase_device *kbdev = dev_get_drvdata(dev); + + *freq = kbdev->current_nominal_freq; + + return 0; +} + +static int +kbase_devfreq_status(struct device *dev, struct devfreq_dev_status *stat) +{ + struct kbase_device *kbdev = dev_get_drvdata(dev); + + stat->current_frequency = kbdev->current_nominal_freq; + + kbase_pm_get_dvfs_utilisation(kbdev, + &stat->total_time, &stat->busy_time); + + stat->private_data = NULL; + + return 0; +} + +static int kbase_devfreq_init_freq_table(struct kbase_device *kbdev, + struct devfreq_dev_profile *dp) +{ + int count; + int i = 0; + unsigned long freq; + struct dev_pm_opp *opp; + + count = dev_pm_opp_get_opp_count(kbdev->dev); + if (count < 0) { + return count; + } + + dp->freq_table = kmalloc_array(count, sizeof(dp->freq_table[0]), + GFP_KERNEL); + if (!dp->freq_table) + return -ENOMEM; + + for (i = 0, freq = ULONG_MAX; i < count; i++, freq--) { + opp = dev_pm_opp_find_freq_floor(kbdev->dev, &freq); + if (IS_ERR(opp)) + break; + + dp->freq_table[i] = freq; + } + + if (count != i) + dev_warn(kbdev->dev, "Unable to enumerate all OPPs (%d!=%d\n", + count, i); + + dp->max_state = i; + + return 0; +} + +static void kbase_devfreq_term_freq_table(struct kbase_device *kbdev) +{ + struct devfreq_dev_profile *dp = kbdev->devfreq->profile; + + kfree(dp->freq_table); +} + +static void kbase_devfreq_exit(struct device *dev) +{ + struct kbase_device *kbdev = dev_get_drvdata(dev); + + kbase_devfreq_term_freq_table(kbdev); +} + +static int kbase_devfreq_init_core_mask_table(struct kbase_device *kbdev) +{ + struct device_node *opp_node = of_parse_phandle(kbdev->dev->of_node, + "operating-points-v2", 0); + struct device_node *node; + int i = 0; + int count; + + if (!opp_node) + return 0; + if (!of_device_is_compatible(opp_node, "operating-points-v2-mali")) + return 0; + + count = dev_pm_opp_get_opp_count(kbdev->dev); + kbdev->opp_table = kmalloc_array(count, + sizeof(struct kbase_devfreq_opp), GFP_KERNEL); + if (!kbdev->opp_table) + return -ENOMEM; + + for_each_available_child_of_node(opp_node, node) { + u64 core_mask; + u64 opp_freq, real_freq; + const void *core_count_p; + + if (of_property_read_u64(node, "opp-hz", &opp_freq)) { + dev_warn(kbdev->dev, "OPP is missing required opp-hz property\n"); + continue; + } + if (of_property_read_u64(node, "opp-hz-real", &real_freq)) + real_freq = opp_freq; + if (of_property_read_u64(node, "opp-core-mask", &core_mask)) + core_mask = + kbdev->gpu_props.props.raw_props.shader_present; + core_count_p = of_get_property(node, "opp-core-count", NULL); + if (core_count_p) { + u64 remaining_core_mask = + kbdev->gpu_props.props.raw_props.shader_present; + int core_count = be32_to_cpup(core_count_p); + + core_mask = 0; + + for (; core_count > 0; core_count--) { + int core = ffs(remaining_core_mask); + + if (!core) { + dev_err(kbdev->dev, "OPP has more cores than GPU\n"); + return -ENODEV; + } + + core_mask |= (1ull << (core-1)); + remaining_core_mask &= ~(1ull << (core-1)); + } + } + + if (!core_mask) { + dev_err(kbdev->dev, "OPP has invalid core mask of 0\n"); + return -ENODEV; + } + + kbdev->opp_table[i].opp_freq = opp_freq; + kbdev->opp_table[i].real_freq = real_freq; + kbdev->opp_table[i].core_mask = core_mask; + + dev_info(kbdev->dev, "OPP %d : opp_freq=%llu real_freq=%llu core_mask=%llx\n", + i, opp_freq, real_freq, core_mask); + + i++; + } + + kbdev->num_opps = i; + + return 0; +} + +int kbase_devfreq_init(struct kbase_device *kbdev) +{ + struct devfreq_dev_profile *dp; + int err; + + if (!kbdev->clock) { + dev_err(kbdev->dev, "Clock not available for devfreq\n"); + return -ENODEV; + } + + kbdev->current_freq = clk_get_rate(kbdev->clock); + kbdev->current_nominal_freq = kbdev->current_freq; + + dp = &kbdev->devfreq_profile; + + dp->initial_freq = kbdev->current_freq; + dp->polling_ms = 100; + dp->target = kbase_devfreq_target; + dp->get_dev_status = kbase_devfreq_status; + dp->get_cur_freq = kbase_devfreq_cur_freq; + dp->exit = kbase_devfreq_exit; + + if (kbase_devfreq_init_freq_table(kbdev, dp)) + return -EFAULT; + + err = kbase_devfreq_init_core_mask_table(kbdev); + if (err) + return err; + + kbdev->devfreq = devfreq_add_device(kbdev->dev, dp, + "simple_ondemand", NULL); + if (IS_ERR(kbdev->devfreq)) { + kbase_devfreq_term_freq_table(kbdev); + return PTR_ERR(kbdev->devfreq); + } + + /* devfreq_add_device only copies a few of kbdev->dev's fields, so + * set drvdata explicitly so IPA models can access kbdev. */ + dev_set_drvdata(&kbdev->devfreq->dev, kbdev); + + err = devfreq_register_opp_notifier(kbdev->dev, kbdev->devfreq); + if (err) { + dev_err(kbdev->dev, + "Failed to register OPP notifier (%d)\n", err); + goto opp_notifier_failed; + } + +#ifdef CONFIG_DEVFREQ_THERMAL + err = kbase_ipa_init(kbdev); + if (err) { + dev_err(kbdev->dev, "IPA initialization failed\n"); + goto cooling_failed; + } + + kbdev->devfreq_cooling = of_devfreq_cooling_register_power( + kbdev->dev->of_node, + kbdev->devfreq, + &kbase_ipa_power_model_ops); + if (IS_ERR_OR_NULL(kbdev->devfreq_cooling)) { + err = PTR_ERR(kbdev->devfreq_cooling); + dev_err(kbdev->dev, + "Failed to register cooling device (%d)\n", + err); + goto cooling_failed; + } +#endif + + return 0; + +#ifdef CONFIG_DEVFREQ_THERMAL +cooling_failed: + devfreq_unregister_opp_notifier(kbdev->dev, kbdev->devfreq); +#endif /* CONFIG_DEVFREQ_THERMAL */ +opp_notifier_failed: + if (devfreq_remove_device(kbdev->devfreq)) + dev_err(kbdev->dev, "Failed to terminate devfreq (%d)\n", err); + else + kbdev->devfreq = NULL; + + return err; +} + +void kbase_devfreq_term(struct kbase_device *kbdev) +{ + int err; + + dev_dbg(kbdev->dev, "Term Mali devfreq\n"); + +#ifdef CONFIG_DEVFREQ_THERMAL + if (kbdev->devfreq_cooling) + devfreq_cooling_unregister(kbdev->devfreq_cooling); + + kbase_ipa_term(kbdev); +#endif + + devfreq_unregister_opp_notifier(kbdev->dev, kbdev->devfreq); + + err = devfreq_remove_device(kbdev->devfreq); + if (err) + dev_err(kbdev->dev, "Failed to terminate devfreq (%d)\n", err); + else + kbdev->devfreq = NULL; + + kfree(kbdev->opp_table); +} diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_devfreq.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_devfreq.h new file mode 100644 index 00000000000000..c0bf8b15b3bcd6 --- /dev/null +++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_devfreq.h @@ -0,0 +1,24 @@ +/* + * + * (C) COPYRIGHT 2014 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +#ifndef _BASE_DEVFREQ_H_ +#define _BASE_DEVFREQ_H_ + +int kbase_devfreq_init(struct kbase_device *kbdev); +void kbase_devfreq_term(struct kbase_device *kbdev); + +#endif /* _BASE_DEVFREQ_H_ */ diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_device_hw.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_device_hw.c new file mode 100644 index 00000000000000..dcdf15cdc3e8d8 --- /dev/null +++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_device_hw.c @@ -0,0 +1,255 @@ +/* + * + * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + +/* + * + */ +#include +#include +#include + +#include + +#if !defined(CONFIG_MALI_NO_MALI) + + +#ifdef CONFIG_DEBUG_FS + + +int kbase_io_history_resize(struct kbase_io_history *h, u16 new_size) +{ + struct kbase_io_access *old_buf; + struct kbase_io_access *new_buf; + unsigned long flags; + + if (!new_size) + goto out_err; /* The new size must not be 0 */ + + new_buf = vmalloc(new_size * sizeof(*h->buf)); + if (!new_buf) + goto out_err; + + spin_lock_irqsave(&h->lock, flags); + + old_buf = h->buf; + + /* Note: we won't bother with copying the old data over. The dumping + * logic wouldn't work properly as it relies on 'count' both as a + * counter and as an index to the buffer which would have changed with + * the new array. This is a corner case that we don't need to support. + */ + h->count = 0; + h->size = new_size; + h->buf = new_buf; + + spin_unlock_irqrestore(&h->lock, flags); + + vfree(old_buf); + + return 0; + +out_err: + return -1; +} + + +int kbase_io_history_init(struct kbase_io_history *h, u16 n) +{ + h->enabled = false; + spin_lock_init(&h->lock); + h->count = 0; + h->size = 0; + h->buf = NULL; + if (kbase_io_history_resize(h, n)) + return -1; + + return 0; +} + + +void kbase_io_history_term(struct kbase_io_history *h) +{ + vfree(h->buf); + h->buf = NULL; +} + + +/* kbase_io_history_add - add new entry to the register access history + * + * @h: Pointer to the history data structure + * @addr: Register address + * @value: The value that is either read from or written to the register + * @write: 1 if it's a register write, 0 if it's a read + */ +static void kbase_io_history_add(struct kbase_io_history *h, + void __iomem const *addr, u32 value, u8 write) +{ + struct kbase_io_access *io; + unsigned long flags; + + spin_lock_irqsave(&h->lock, flags); + + io = &h->buf[h->count % h->size]; + io->addr = (uintptr_t)addr | write; + io->value = value; + ++h->count; + /* If count overflows, move the index by the buffer size so the entire + * buffer will still be dumped later */ + if (unlikely(!h->count)) + h->count = h->size; + + spin_unlock_irqrestore(&h->lock, flags); +} + + +void kbase_io_history_dump(struct kbase_device *kbdev) +{ + struct kbase_io_history *const h = &kbdev->io_history; + u16 i; + size_t iters; + unsigned long flags; + + if (!unlikely(h->enabled)) + return; + + spin_lock_irqsave(&h->lock, flags); + + dev_err(kbdev->dev, "Register IO History:"); + iters = (h->size > h->count) ? h->count : h->size; + dev_err(kbdev->dev, "Last %zu register accesses of %zu total:\n", iters, + h->count); + for (i = 0; i < iters; ++i) { + struct kbase_io_access *io = + &h->buf[(h->count - iters + i) % h->size]; + char const access = (io->addr & 1) ? 'w' : 'r'; + + dev_err(kbdev->dev, "%6i: %c: reg 0x%p val %08x\n", i, access, + (void *)(io->addr & ~0x1), io->value); + } + + spin_unlock_irqrestore(&h->lock, flags); +} + + +#endif /* CONFIG_DEBUG_FS */ + + +void kbase_reg_write(struct kbase_device *kbdev, u16 offset, u32 value, + struct kbase_context *kctx) +{ + KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_powered); + KBASE_DEBUG_ASSERT(kctx == NULL || kctx->as_nr != KBASEP_AS_NR_INVALID); + KBASE_DEBUG_ASSERT(kbdev->dev != NULL); + + writel(value, kbdev->reg + offset); + +#ifdef CONFIG_DEBUG_FS + if (unlikely(kbdev->io_history.enabled)) + kbase_io_history_add(&kbdev->io_history, kbdev->reg + offset, + value, 1); +#endif /* CONFIG_DEBUG_FS */ + dev_dbg(kbdev->dev, "w: reg %04x val %08x", offset, value); + + if (kctx && kctx->jctx.tb) + kbase_device_trace_register_access(kctx, REG_WRITE, offset, + value); +} + +KBASE_EXPORT_TEST_API(kbase_reg_write); + +u32 kbase_reg_read(struct kbase_device *kbdev, u16 offset, + struct kbase_context *kctx) +{ + u32 val; + KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_powered); + KBASE_DEBUG_ASSERT(kctx == NULL || kctx->as_nr != KBASEP_AS_NR_INVALID); + KBASE_DEBUG_ASSERT(kbdev->dev != NULL); + + val = readl(kbdev->reg + offset); + +#ifdef CONFIG_DEBUG_FS + if (unlikely(kbdev->io_history.enabled)) + kbase_io_history_add(&kbdev->io_history, kbdev->reg + offset, + val, 0); +#endif /* CONFIG_DEBUG_FS */ + dev_dbg(kbdev->dev, "r: reg %04x val %08x", offset, val); + + if (kctx && kctx->jctx.tb) + kbase_device_trace_register_access(kctx, REG_READ, offset, val); + return val; +} + +KBASE_EXPORT_TEST_API(kbase_reg_read); +#endif /* !defined(CONFIG_MALI_NO_MALI) */ + +/** + * kbase_report_gpu_fault - Report a GPU fault. + * @kbdev: Kbase device pointer + * @multiple: Zero if only GPU_FAULT was raised, non-zero if MULTIPLE_GPU_FAULTS + * was also set + * + * This function is called from the interrupt handler when a GPU fault occurs. + * It reports the details of the fault using dev_warn(). + */ +static void kbase_report_gpu_fault(struct kbase_device *kbdev, int multiple) +{ + u32 status; + u64 address; + + status = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_FAULTSTATUS), NULL); + address = (u64) kbase_reg_read(kbdev, + GPU_CONTROL_REG(GPU_FAULTADDRESS_HI), NULL) << 32; + address |= kbase_reg_read(kbdev, + GPU_CONTROL_REG(GPU_FAULTADDRESS_LO), NULL); + + dev_warn(kbdev->dev, "GPU Fault 0x%08x (%s) at 0x%016llx", + status & 0xFF, + kbase_exception_name(kbdev, status), + address); + if (multiple) + dev_warn(kbdev->dev, "There were multiple GPU faults - some have not been reported\n"); +} + +void kbase_gpu_interrupt(struct kbase_device *kbdev, u32 val) +{ + KBASE_TRACE_ADD(kbdev, CORE_GPU_IRQ, NULL, NULL, 0u, val); + if (val & GPU_FAULT) + kbase_report_gpu_fault(kbdev, val & MULTIPLE_GPU_FAULTS); + + if (val & RESET_COMPLETED) + kbase_pm_reset_done(kbdev); + + if (val & PRFCNT_SAMPLE_COMPLETED) + kbase_instr_hwcnt_sample_done(kbdev); + + if (val & CLEAN_CACHES_COMPLETED) + kbase_clean_caches_done(kbdev); + + KBASE_TRACE_ADD(kbdev, CORE_GPU_IRQ_CLEAR, NULL, NULL, 0u, val); + kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_CLEAR), val, NULL); + + /* kbase_pm_check_transitions must be called after the IRQ has been + * cleared. This is because it might trigger further power transitions + * and we don't want to miss the interrupt raised to notify us that + * these further transitions have finished. + */ + if (val & POWER_CHANGED_ALL) + kbase_pm_power_changed(kbdev); + + KBASE_TRACE_ADD(kbdev, CORE_GPU_IRQ_DONE, NULL, NULL, 0u, val); +} diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_device_internal.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_device_internal.h new file mode 100644 index 00000000000000..5b20445932fb62 --- /dev/null +++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_device_internal.h @@ -0,0 +1,67 @@ +/* + * + * (C) COPYRIGHT 2014 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + + +/* + * Backend-specific HW access device APIs + */ + +#ifndef _KBASE_DEVICE_INTERNAL_H_ +#define _KBASE_DEVICE_INTERNAL_H_ + +/** + * kbase_reg_write - write to GPU register + * @kbdev: Kbase device pointer + * @offset: Offset of register + * @value: Value to write + * @kctx: Kbase context pointer. May be NULL + * + * Caller must ensure the GPU is powered (@kbdev->pm.gpu_powered != false). If + * @kctx is not NULL then the caller must ensure it is scheduled (@kctx->as_nr + * != KBASEP_AS_NR_INVALID). + */ +void kbase_reg_write(struct kbase_device *kbdev, u16 offset, u32 value, + struct kbase_context *kctx); + +/** + * kbase_reg_read - read from GPU register + * @kbdev: Kbase device pointer + * @offset: Offset of register + * @kctx: Kbase context pointer. May be NULL + * + * Caller must ensure the GPU is powered (@kbdev->pm.gpu_powered != false). If + * @kctx is not NULL then the caller must ensure it is scheduled (@kctx->as_nr + * != KBASEP_AS_NR_INVALID). + * + * Return: Value in desired register + */ +u32 kbase_reg_read(struct kbase_device *kbdev, u16 offset, + struct kbase_context *kctx); + + +/** + * kbase_gpu_interrupt - GPU interrupt handler + * @kbdev: Kbase device pointer + * @val: The value of the GPU IRQ status register which triggered the call + * + * This function is called from the interrupt handler when a GPU irq is to be + * handled. + */ +void kbase_gpu_interrupt(struct kbase_device *kbdev, u32 val); + +#endif /* _KBASE_DEVICE_INTERNAL_H_ */ diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_gpu.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_gpu.c new file mode 100644 index 00000000000000..6bceba19ea39a5 --- /dev/null +++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_gpu.c @@ -0,0 +1,130 @@ +/* + * + * (C) COPYRIGHT 2014-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + +/* + * Register-based HW access backend APIs + */ +#include +#include +#include +#include +#include +#include + +int kbase_backend_early_init(struct kbase_device *kbdev) +{ + int err; + + err = kbasep_platform_device_init(kbdev); + if (err) + return err; + + err = kbase_pm_runtime_init(kbdev); + if (err) + goto fail_runtime_pm; + + /* Ensure we can access the GPU registers */ + kbase_pm_register_access_enable(kbdev); + + /* Find out GPU properties based on the GPU feature registers */ + kbase_gpuprops_set(kbdev); + + /* We're done accessing the GPU registers for now. */ + kbase_pm_register_access_disable(kbdev); + + err = kbase_install_interrupts(kbdev); + if (err) + goto fail_interrupts; + + err = kbase_hwaccess_pm_init(kbdev); + if (err) + goto fail_pm; + + return 0; + +fail_pm: + kbase_release_interrupts(kbdev); +fail_interrupts: + kbase_pm_runtime_term(kbdev); +fail_runtime_pm: + kbasep_platform_device_term(kbdev); + + return err; +} + +void kbase_backend_early_term(struct kbase_device *kbdev) +{ + kbase_hwaccess_pm_term(kbdev); + kbase_release_interrupts(kbdev); + kbase_pm_runtime_term(kbdev); + kbasep_platform_device_term(kbdev); +} + +int kbase_backend_late_init(struct kbase_device *kbdev) +{ + int err; + + err = kbase_hwaccess_pm_powerup(kbdev, PM_HW_ISSUES_DETECT); + if (err) + return err; + + err = kbase_backend_timer_init(kbdev); + if (err) + goto fail_timer; + +#ifdef CONFIG_MALI_DEBUG +#ifndef CONFIG_MALI_NO_MALI + if (kbasep_common_test_interrupt_handlers(kbdev) != 0) { + dev_err(kbdev->dev, "Interrupt assigment check failed.\n"); + err = -EINVAL; + goto fail_interrupt_test; + } +#endif /* !CONFIG_MALI_NO_MALI */ +#endif /* CONFIG_MALI_DEBUG */ + + err = kbase_job_slot_init(kbdev); + if (err) + goto fail_job_slot; + + init_waitqueue_head(&kbdev->hwaccess.backend.reset_wait); + + return 0; + +fail_job_slot: + +#ifdef CONFIG_MALI_DEBUG +#ifndef CONFIG_MALI_NO_MALI +fail_interrupt_test: +#endif /* !CONFIG_MALI_NO_MALI */ +#endif /* CONFIG_MALI_DEBUG */ + + kbase_backend_timer_term(kbdev); +fail_timer: + kbase_hwaccess_pm_halt(kbdev); + + return err; +} + +void kbase_backend_late_term(struct kbase_device *kbdev) +{ + kbase_job_slot_halt(kbdev); + kbase_job_slot_term(kbdev); + kbase_backend_timer_term(kbdev); + kbase_hwaccess_pm_halt(kbdev); +} + diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_gpuprops_backend.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_gpuprops_backend.c new file mode 100644 index 00000000000000..b395325b556bb2 --- /dev/null +++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_gpuprops_backend.c @@ -0,0 +1,110 @@ +/* + * + * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + + +/* + * Base kernel property query backend APIs + */ + +#include +#include +#include +#include + +void kbase_backend_gpuprops_get(struct kbase_device *kbdev, + struct kbase_gpuprops_regdump *regdump) +{ + int i; + + /* Fill regdump with the content of the relevant registers */ + regdump->gpu_id = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_ID), NULL); + + regdump->l2_features = kbase_reg_read(kbdev, + GPU_CONTROL_REG(L2_FEATURES), NULL); + regdump->suspend_size = kbase_reg_read(kbdev, + GPU_CONTROL_REG(SUSPEND_SIZE), NULL); + regdump->tiler_features = kbase_reg_read(kbdev, + GPU_CONTROL_REG(TILER_FEATURES), NULL); + regdump->mem_features = kbase_reg_read(kbdev, + GPU_CONTROL_REG(MEM_FEATURES), NULL); + regdump->mmu_features = kbase_reg_read(kbdev, + GPU_CONTROL_REG(MMU_FEATURES), NULL); + regdump->as_present = kbase_reg_read(kbdev, + GPU_CONTROL_REG(AS_PRESENT), NULL); + regdump->js_present = kbase_reg_read(kbdev, + GPU_CONTROL_REG(JS_PRESENT), NULL); + + for (i = 0; i < GPU_MAX_JOB_SLOTS; i++) + regdump->js_features[i] = kbase_reg_read(kbdev, + GPU_CONTROL_REG(JS_FEATURES_REG(i)), NULL); + + for (i = 0; i < BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS; i++) + regdump->texture_features[i] = kbase_reg_read(kbdev, + GPU_CONTROL_REG(TEXTURE_FEATURES_REG(i)), NULL); + + regdump->thread_max_threads = kbase_reg_read(kbdev, + GPU_CONTROL_REG(THREAD_MAX_THREADS), NULL); + regdump->thread_max_workgroup_size = kbase_reg_read(kbdev, + GPU_CONTROL_REG(THREAD_MAX_WORKGROUP_SIZE), + NULL); + regdump->thread_max_barrier_size = kbase_reg_read(kbdev, + GPU_CONTROL_REG(THREAD_MAX_BARRIER_SIZE), NULL); + regdump->thread_features = kbase_reg_read(kbdev, + GPU_CONTROL_REG(THREAD_FEATURES), NULL); + + regdump->shader_present_lo = kbase_reg_read(kbdev, + GPU_CONTROL_REG(SHADER_PRESENT_LO), NULL); + regdump->shader_present_hi = kbase_reg_read(kbdev, + GPU_CONTROL_REG(SHADER_PRESENT_HI), NULL); + + regdump->tiler_present_lo = kbase_reg_read(kbdev, + GPU_CONTROL_REG(TILER_PRESENT_LO), NULL); + regdump->tiler_present_hi = kbase_reg_read(kbdev, + GPU_CONTROL_REG(TILER_PRESENT_HI), NULL); + + regdump->l2_present_lo = kbase_reg_read(kbdev, + GPU_CONTROL_REG(L2_PRESENT_LO), NULL); + regdump->l2_present_hi = kbase_reg_read(kbdev, + GPU_CONTROL_REG(L2_PRESENT_HI), NULL); + + regdump->stack_present_lo = kbase_reg_read(kbdev, + GPU_CONTROL_REG(STACK_PRESENT_LO), NULL); + regdump->stack_present_hi = kbase_reg_read(kbdev, + GPU_CONTROL_REG(STACK_PRESENT_HI), NULL); +} + +void kbase_backend_gpuprops_get_features(struct kbase_device *kbdev, + struct kbase_gpuprops_regdump *regdump) +{ + if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_COHERENCY_REG)) { + /* Ensure we can access the GPU registers */ + kbase_pm_register_access_enable(kbdev); + + regdump->coherency_features = kbase_reg_read(kbdev, + GPU_CONTROL_REG(COHERENCY_FEATURES), NULL); + + /* We're done accessing the GPU registers for now. */ + kbase_pm_register_access_disable(kbdev); + } else { + /* Pre COHERENCY_FEATURES we only supported ACE_LITE */ + regdump->coherency_features = + COHERENCY_FEATURE_BIT(COHERENCY_NONE) | + COHERENCY_FEATURE_BIT(COHERENCY_ACE_LITE); + } +} + diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_instr_backend.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_instr_backend.c new file mode 100644 index 00000000000000..7ad309e8d7f456 --- /dev/null +++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_instr_backend.c @@ -0,0 +1,492 @@ +/* + * + * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + + +/* + * GPU backend instrumentation APIs. + */ + +#include +#include +#include +#include +#include +#include + +/** + * kbasep_instr_hwcnt_cacheclean - Issue Cache Clean & Invalidate command to + * hardware + * + * @kbdev: Kbase device + */ +static void kbasep_instr_hwcnt_cacheclean(struct kbase_device *kbdev) +{ + unsigned long flags; + unsigned long pm_flags; + u32 irq_mask; + + spin_lock_irqsave(&kbdev->hwcnt.lock, flags); + KBASE_DEBUG_ASSERT(kbdev->hwcnt.backend.state == + KBASE_INSTR_STATE_REQUEST_CLEAN); + + /* Enable interrupt */ + spin_lock_irqsave(&kbdev->hwaccess_lock, pm_flags); + irq_mask = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), NULL); + kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), + irq_mask | CLEAN_CACHES_COMPLETED, NULL); + spin_unlock_irqrestore(&kbdev->hwaccess_lock, pm_flags); + + /* clean&invalidate the caches so we're sure the mmu tables for the dump + * buffer is valid */ + KBASE_TRACE_ADD(kbdev, CORE_GPU_CLEAN_INV_CACHES, NULL, NULL, 0u, 0); + kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND), + GPU_COMMAND_CLEAN_INV_CACHES, NULL); + kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_CLEANING; + + spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags); +} + +int kbase_instr_hwcnt_enable_internal(struct kbase_device *kbdev, + struct kbase_context *kctx, + struct kbase_uk_hwcnt_setup *setup) +{ + unsigned long flags, pm_flags; + int err = -EINVAL; + u32 irq_mask; + int ret; + u64 shader_cores_needed; + u32 prfcnt_config; + + shader_cores_needed = kbase_pm_get_present_cores(kbdev, + KBASE_PM_CORE_SHADER); + + /* alignment failure */ + if ((setup->dump_buffer == 0ULL) || (setup->dump_buffer & (2048 - 1))) + goto out_err; + + /* Override core availability policy to ensure all cores are available + */ + kbase_pm_ca_instr_enable(kbdev); + + /* Request the cores early on synchronously - we'll release them on any + * errors (e.g. instrumentation already active) */ + kbase_pm_request_cores_sync(kbdev, true, shader_cores_needed); + + spin_lock_irqsave(&kbdev->hwcnt.lock, flags); + + if (kbdev->hwcnt.backend.state != KBASE_INSTR_STATE_DISABLED) { + /* Instrumentation is already enabled */ + spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags); + goto out_unrequest_cores; + } + + /* Enable interrupt */ + spin_lock_irqsave(&kbdev->hwaccess_lock, pm_flags); + irq_mask = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), NULL); + kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), irq_mask | + PRFCNT_SAMPLE_COMPLETED, NULL); + spin_unlock_irqrestore(&kbdev->hwaccess_lock, pm_flags); + + /* In use, this context is the owner */ + kbdev->hwcnt.kctx = kctx; + /* Remember the dump address so we can reprogram it later */ + kbdev->hwcnt.addr = setup->dump_buffer; + + /* Request the clean */ + kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_REQUEST_CLEAN; + kbdev->hwcnt.backend.triggered = 0; + /* Clean&invalidate the caches so we're sure the mmu tables for the dump + * buffer is valid */ + ret = queue_work(kbdev->hwcnt.backend.cache_clean_wq, + &kbdev->hwcnt.backend.cache_clean_work); + KBASE_DEBUG_ASSERT(ret); + + spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags); + + /* Wait for cacheclean to complete */ + wait_event(kbdev->hwcnt.backend.wait, + kbdev->hwcnt.backend.triggered != 0); + + KBASE_DEBUG_ASSERT(kbdev->hwcnt.backend.state == + KBASE_INSTR_STATE_IDLE); + + kbase_pm_request_l2_caches(kbdev); + + /* Configure */ + prfcnt_config = kctx->as_nr << PRFCNT_CONFIG_AS_SHIFT; +#ifdef CONFIG_MALI_PRFCNT_SET_SECONDARY + { + u32 gpu_id = kbdev->gpu_props.props.raw_props.gpu_id; + u32 product_id = (gpu_id & GPU_ID_VERSION_PRODUCT_ID) + >> GPU_ID_VERSION_PRODUCT_ID_SHIFT; + int arch_v6 = GPU_ID_IS_NEW_FORMAT(product_id); + + if (arch_v6) + prfcnt_config |= 1 << PRFCNT_CONFIG_SETSELECT_SHIFT; + } +#endif + + kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_CONFIG), + prfcnt_config | PRFCNT_CONFIG_MODE_OFF, kctx); + + kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_LO), + setup->dump_buffer & 0xFFFFFFFF, kctx); + kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_HI), + setup->dump_buffer >> 32, kctx); + kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_JM_EN), + setup->jm_bm, kctx); + kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_SHADER_EN), + setup->shader_bm, kctx); + kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_MMU_L2_EN), + setup->mmu_l2_bm, kctx); + /* Due to PRLAM-8186 we need to disable the Tiler before we enable the + * HW counter dump. */ + if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8186)) + kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_TILER_EN), 0, + kctx); + else + kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_TILER_EN), + setup->tiler_bm, kctx); + + kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_CONFIG), + prfcnt_config | PRFCNT_CONFIG_MODE_MANUAL, kctx); + + /* If HW has PRLAM-8186 we can now re-enable the tiler HW counters dump + */ + if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8186)) + kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_TILER_EN), + setup->tiler_bm, kctx); + + spin_lock_irqsave(&kbdev->hwcnt.lock, flags); + + kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_IDLE; + kbdev->hwcnt.backend.triggered = 1; + wake_up(&kbdev->hwcnt.backend.wait); + + spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags); + + err = 0; + + dev_dbg(kbdev->dev, "HW counters dumping set-up for context %p", kctx); + return err; + out_unrequest_cores: + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); + kbase_pm_unrequest_cores(kbdev, true, shader_cores_needed); + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); + out_err: + return err; +} + +int kbase_instr_hwcnt_disable_internal(struct kbase_context *kctx) +{ + unsigned long flags, pm_flags; + int err = -EINVAL; + u32 irq_mask; + struct kbase_device *kbdev = kctx->kbdev; + + while (1) { + spin_lock_irqsave(&kbdev->hwcnt.lock, flags); + + if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_DISABLED) { + /* Instrumentation is not enabled */ + spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags); + goto out; + } + + if (kbdev->hwcnt.kctx != kctx) { + /* Instrumentation has been setup for another context */ + spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags); + goto out; + } + + if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_IDLE) + break; + + spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags); + + /* Ongoing dump/setup - wait for its completion */ + wait_event(kbdev->hwcnt.backend.wait, + kbdev->hwcnt.backend.triggered != 0); + } + + kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_DISABLED; + kbdev->hwcnt.backend.triggered = 0; + + /* Disable interrupt */ + spin_lock_irqsave(&kbdev->hwaccess_lock, pm_flags); + irq_mask = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), NULL); + kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), + irq_mask & ~PRFCNT_SAMPLE_COMPLETED, NULL); + + /* Disable the counters */ + kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_CONFIG), 0, kctx); + + kbdev->hwcnt.kctx = NULL; + kbdev->hwcnt.addr = 0ULL; + + kbase_pm_ca_instr_disable(kbdev); + + kbase_pm_unrequest_cores(kbdev, true, + kbase_pm_get_present_cores(kbdev, KBASE_PM_CORE_SHADER)); + + kbase_pm_release_l2_caches(kbdev); + + spin_unlock_irqrestore(&kbdev->hwaccess_lock, pm_flags); + spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags); + + dev_dbg(kbdev->dev, "HW counters dumping disabled for context %p", + kctx); + + err = 0; + + out: + return err; +} + +int kbase_instr_hwcnt_request_dump(struct kbase_context *kctx) +{ + unsigned long flags; + int err = -EINVAL; + struct kbase_device *kbdev = kctx->kbdev; + + spin_lock_irqsave(&kbdev->hwcnt.lock, flags); + + if (kbdev->hwcnt.kctx != kctx) { + /* The instrumentation has been setup for another context */ + goto unlock; + } + + if (kbdev->hwcnt.backend.state != KBASE_INSTR_STATE_IDLE) { + /* HW counters are disabled or another dump is ongoing, or we're + * resetting */ + goto unlock; + } + + kbdev->hwcnt.backend.triggered = 0; + + /* Mark that we're dumping - the PF handler can signal that we faulted + */ + kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_DUMPING; + + /* Reconfigure the dump address */ + kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_LO), + kbdev->hwcnt.addr & 0xFFFFFFFF, NULL); + kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_HI), + kbdev->hwcnt.addr >> 32, NULL); + + /* Start dumping */ + KBASE_TRACE_ADD(kbdev, CORE_GPU_PRFCNT_SAMPLE, NULL, NULL, + kbdev->hwcnt.addr, 0); + kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND), + GPU_COMMAND_PRFCNT_SAMPLE, kctx); + + dev_dbg(kbdev->dev, "HW counters dumping done for context %p", kctx); + + err = 0; + + unlock: + spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags); + return err; +} +KBASE_EXPORT_SYMBOL(kbase_instr_hwcnt_request_dump); + +bool kbase_instr_hwcnt_dump_complete(struct kbase_context *kctx, + bool * const success) +{ + unsigned long flags; + bool complete = false; + struct kbase_device *kbdev = kctx->kbdev; + + spin_lock_irqsave(&kbdev->hwcnt.lock, flags); + + if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_IDLE) { + *success = true; + complete = true; + } else if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_FAULT) { + *success = false; + complete = true; + kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_IDLE; + } + + spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags); + + return complete; +} +KBASE_EXPORT_SYMBOL(kbase_instr_hwcnt_dump_complete); + +void kbasep_cache_clean_worker(struct work_struct *data) +{ + struct kbase_device *kbdev; + unsigned long flags; + + kbdev = container_of(data, struct kbase_device, + hwcnt.backend.cache_clean_work); + + mutex_lock(&kbdev->cacheclean_lock); + kbasep_instr_hwcnt_cacheclean(kbdev); + + spin_lock_irqsave(&kbdev->hwcnt.lock, flags); + /* Wait for our condition, and any reset to complete */ + while (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_CLEANING) { + spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags); + wait_event(kbdev->hwcnt.backend.cache_clean_wait, + kbdev->hwcnt.backend.state != + KBASE_INSTR_STATE_CLEANING); + spin_lock_irqsave(&kbdev->hwcnt.lock, flags); + } + KBASE_DEBUG_ASSERT(kbdev->hwcnt.backend.state == + KBASE_INSTR_STATE_CLEANED); + + /* All finished and idle */ + kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_IDLE; + kbdev->hwcnt.backend.triggered = 1; + wake_up(&kbdev->hwcnt.backend.wait); + + spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags); + mutex_unlock(&kbdev->cacheclean_lock); +} + +void kbase_instr_hwcnt_sample_done(struct kbase_device *kbdev) +{ + unsigned long flags; + + spin_lock_irqsave(&kbdev->hwcnt.lock, flags); + + if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_FAULT) { + kbdev->hwcnt.backend.triggered = 1; + wake_up(&kbdev->hwcnt.backend.wait); + } else if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_DUMPING) { + int ret; + /* Always clean and invalidate the cache after a successful dump + */ + kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_REQUEST_CLEAN; + ret = queue_work(kbdev->hwcnt.backend.cache_clean_wq, + &kbdev->hwcnt.backend.cache_clean_work); + KBASE_DEBUG_ASSERT(ret); + } + + spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags); +} + +void kbase_clean_caches_done(struct kbase_device *kbdev) +{ + u32 irq_mask; + + if (kbdev->hwcnt.backend.state != KBASE_INSTR_STATE_DISABLED) { + unsigned long flags; + unsigned long pm_flags; + + spin_lock_irqsave(&kbdev->hwcnt.lock, flags); + /* Disable interrupt */ + spin_lock_irqsave(&kbdev->hwaccess_lock, pm_flags); + irq_mask = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), + NULL); + kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), + irq_mask & ~CLEAN_CACHES_COMPLETED, NULL); + spin_unlock_irqrestore(&kbdev->hwaccess_lock, pm_flags); + + /* Wakeup... */ + if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_CLEANING) { + /* Only wake if we weren't resetting */ + kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_CLEANED; + wake_up(&kbdev->hwcnt.backend.cache_clean_wait); + } + + spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags); + } +} + +int kbase_instr_hwcnt_wait_for_dump(struct kbase_context *kctx) +{ + struct kbase_device *kbdev = kctx->kbdev; + unsigned long flags; + int err; + + /* Wait for dump & cacheclean to complete */ + wait_event(kbdev->hwcnt.backend.wait, + kbdev->hwcnt.backend.triggered != 0); + + spin_lock_irqsave(&kbdev->hwcnt.lock, flags); + + if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_FAULT) { + err = -EINVAL; + kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_IDLE; + } else { + /* Dump done */ + KBASE_DEBUG_ASSERT(kbdev->hwcnt.backend.state == + KBASE_INSTR_STATE_IDLE); + err = 0; + } + + spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags); + + return err; +} + +int kbase_instr_hwcnt_clear(struct kbase_context *kctx) +{ + unsigned long flags; + int err = -EINVAL; + struct kbase_device *kbdev = kctx->kbdev; + + spin_lock_irqsave(&kbdev->hwcnt.lock, flags); + + /* Check it's the context previously set up and we're not already + * dumping */ + if (kbdev->hwcnt.kctx != kctx || kbdev->hwcnt.backend.state != + KBASE_INSTR_STATE_IDLE) + goto out; + + /* Clear the counters */ + KBASE_TRACE_ADD(kbdev, CORE_GPU_PRFCNT_CLEAR, NULL, NULL, 0u, 0); + kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND), + GPU_COMMAND_PRFCNT_CLEAR, kctx); + + err = 0; + +out: + spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags); + return err; +} +KBASE_EXPORT_SYMBOL(kbase_instr_hwcnt_clear); + +int kbase_instr_backend_init(struct kbase_device *kbdev) +{ + int ret = 0; + + kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_DISABLED; + + init_waitqueue_head(&kbdev->hwcnt.backend.wait); + init_waitqueue_head(&kbdev->hwcnt.backend.cache_clean_wait); + INIT_WORK(&kbdev->hwcnt.backend.cache_clean_work, + kbasep_cache_clean_worker); + kbdev->hwcnt.backend.triggered = 0; + + kbdev->hwcnt.backend.cache_clean_wq = + alloc_workqueue("Mali cache cleaning workqueue", 0, 1); + if (NULL == kbdev->hwcnt.backend.cache_clean_wq) + ret = -EINVAL; + + return ret; +} + +void kbase_instr_backend_term(struct kbase_device *kbdev) +{ + destroy_workqueue(kbdev->hwcnt.backend.cache_clean_wq); +} + diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_instr_defs.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_instr_defs.h new file mode 100644 index 00000000000000..4794672da8f098 --- /dev/null +++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_instr_defs.h @@ -0,0 +1,58 @@ +/* + * + * (C) COPYRIGHT 2014, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +/* + * Backend-specific instrumentation definitions + */ + +#ifndef _KBASE_INSTR_DEFS_H_ +#define _KBASE_INSTR_DEFS_H_ + +/* + * Instrumentation State Machine States + */ +enum kbase_instr_state { + /* State where instrumentation is not active */ + KBASE_INSTR_STATE_DISABLED = 0, + /* State machine is active and ready for a command. */ + KBASE_INSTR_STATE_IDLE, + /* Hardware is currently dumping a frame. */ + KBASE_INSTR_STATE_DUMPING, + /* We've requested a clean to occur on a workqueue */ + KBASE_INSTR_STATE_REQUEST_CLEAN, + /* Hardware is currently cleaning and invalidating caches. */ + KBASE_INSTR_STATE_CLEANING, + /* Cache clean completed, and either a) a dump is complete, or + * b) instrumentation can now be setup. */ + KBASE_INSTR_STATE_CLEANED, + /* An error has occured during DUMPING (page fault). */ + KBASE_INSTR_STATE_FAULT +}; + +/* Structure used for instrumentation and HW counters dumping */ +struct kbase_instr_backend { + wait_queue_head_t wait; + int triggered; + + enum kbase_instr_state state; + wait_queue_head_t cache_clean_wait; + struct workqueue_struct *cache_clean_wq; + struct work_struct cache_clean_work; +}; + +#endif /* _KBASE_INSTR_DEFS_H_ */ + diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_instr_internal.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_instr_internal.h new file mode 100644 index 00000000000000..e96aeae786e1e5 --- /dev/null +++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_instr_internal.h @@ -0,0 +1,45 @@ +/* + * + * (C) COPYRIGHT 2014 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + + +/* + * Backend-specific HW access instrumentation APIs + */ + +#ifndef _KBASE_INSTR_INTERNAL_H_ +#define _KBASE_INSTR_INTERNAL_H_ + +/** + * kbasep_cache_clean_worker() - Workqueue for handling cache cleaning + * @data: a &struct work_struct + */ +void kbasep_cache_clean_worker(struct work_struct *data); + +/** + * kbase_clean_caches_done() - Cache clean interrupt received + * @kbdev: Kbase device + */ +void kbase_clean_caches_done(struct kbase_device *kbdev); + +/** + * kbase_instr_hwcnt_sample_done() - Dump complete interrupt received + * @kbdev: Kbase device + */ +void kbase_instr_hwcnt_sample_done(struct kbase_device *kbdev); + +#endif /* _KBASE_INSTR_INTERNAL_H_ */ diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_irq_internal.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_irq_internal.h new file mode 100644 index 00000000000000..8781561e73d001 --- /dev/null +++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_irq_internal.h @@ -0,0 +1,39 @@ +/* + * + * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +/* + * Backend specific IRQ APIs + */ + +#ifndef _KBASE_IRQ_INTERNAL_H_ +#define _KBASE_IRQ_INTERNAL_H_ + +int kbase_install_interrupts(struct kbase_device *kbdev); + +void kbase_release_interrupts(struct kbase_device *kbdev); + +/** + * kbase_synchronize_irqs - Ensure that all IRQ handlers have completed + * execution + * @kbdev: The kbase device + */ +void kbase_synchronize_irqs(struct kbase_device *kbdev); + +int kbasep_common_test_interrupt_handlers( + struct kbase_device * const kbdev); + +#endif /* _KBASE_IRQ_INTERNAL_H_ */ diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_irq_linux.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_irq_linux.c new file mode 100644 index 00000000000000..8416b80e8b77d9 --- /dev/null +++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_irq_linux.c @@ -0,0 +1,469 @@ +/* + * + * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +#include +#include +#include + +#include + +#if !defined(CONFIG_MALI_NO_MALI) + +/* GPU IRQ Tags */ +#define JOB_IRQ_TAG 0 +#define MMU_IRQ_TAG 1 +#define GPU_IRQ_TAG 2 + +static void *kbase_tag(void *ptr, u32 tag) +{ + return (void *)(((uintptr_t) ptr) | tag); +} + +static void *kbase_untag(void *ptr) +{ + return (void *)(((uintptr_t) ptr) & ~3); +} + +static irqreturn_t kbase_job_irq_handler(int irq, void *data) +{ + unsigned long flags; + struct kbase_device *kbdev = kbase_untag(data); + u32 val; + + spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock, flags); + + if (!kbdev->pm.backend.gpu_powered) { + /* GPU is turned off - IRQ is not for us */ + spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, + flags); + return IRQ_NONE; + } + + val = kbase_reg_read(kbdev, JOB_CONTROL_REG(JOB_IRQ_STATUS), NULL); + +#ifdef CONFIG_MALI_DEBUG + if (!kbdev->pm.backend.driver_ready_for_irqs) + dev_warn(kbdev->dev, "%s: irq %d irqstatus 0x%x before driver is ready\n", + __func__, irq, val); +#endif /* CONFIG_MALI_DEBUG */ + spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, flags); + + if (!val) + return IRQ_NONE; + + dev_dbg(kbdev->dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val); + + kbase_job_done(kbdev, val); + + return IRQ_HANDLED; +} + +KBASE_EXPORT_TEST_API(kbase_job_irq_handler); + +static irqreturn_t kbase_mmu_irq_handler(int irq, void *data) +{ + unsigned long flags; + struct kbase_device *kbdev = kbase_untag(data); + u32 val; + + spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock, flags); + + if (!kbdev->pm.backend.gpu_powered) { + /* GPU is turned off - IRQ is not for us */ + spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, + flags); + return IRQ_NONE; + } + + atomic_inc(&kbdev->faults_pending); + + val = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_STATUS), NULL); + +#ifdef CONFIG_MALI_DEBUG + if (!kbdev->pm.backend.driver_ready_for_irqs) + dev_warn(kbdev->dev, "%s: irq %d irqstatus 0x%x before driver is ready\n", + __func__, irq, val); +#endif /* CONFIG_MALI_DEBUG */ + spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, flags); + + if (!val) { + atomic_dec(&kbdev->faults_pending); + return IRQ_NONE; + } + + dev_dbg(kbdev->dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val); + + kbase_mmu_interrupt(kbdev, val); + + atomic_dec(&kbdev->faults_pending); + + return IRQ_HANDLED; +} + +static irqreturn_t kbase_gpu_irq_handler(int irq, void *data) +{ + unsigned long flags; + struct kbase_device *kbdev = kbase_untag(data); + u32 val; + + spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock, flags); + + if (!kbdev->pm.backend.gpu_powered) { + /* GPU is turned off - IRQ is not for us */ + spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, + flags); + return IRQ_NONE; + } + + val = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_STATUS), NULL); + +#ifdef CONFIG_MALI_DEBUG + if (!kbdev->pm.backend.driver_ready_for_irqs) + dev_dbg(kbdev->dev, "%s: irq %d irqstatus 0x%x before driver is ready\n", + __func__, irq, val); +#endif /* CONFIG_MALI_DEBUG */ + spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, flags); + + if (!val) + return IRQ_NONE; + + dev_dbg(kbdev->dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val); + + kbase_gpu_interrupt(kbdev, val); + + return IRQ_HANDLED; +} + +KBASE_EXPORT_TEST_API(kbase_gpu_irq_handler); + +static irq_handler_t kbase_handler_table[] = { + [JOB_IRQ_TAG] = kbase_job_irq_handler, + [MMU_IRQ_TAG] = kbase_mmu_irq_handler, + [GPU_IRQ_TAG] = kbase_gpu_irq_handler, +}; + +#ifdef CONFIG_MALI_DEBUG +#define JOB_IRQ_HANDLER JOB_IRQ_TAG +#define MMU_IRQ_HANDLER MMU_IRQ_TAG +#define GPU_IRQ_HANDLER GPU_IRQ_TAG + +/** + * kbase_set_custom_irq_handler - Set a custom IRQ handler + * @kbdev: Device for which the handler is to be registered + * @custom_handler: Handler to be registered + * @irq_type: Interrupt type + * + * Registers given interrupt handler for requested interrupt type + * In the case where irq handler is not specified, the default handler shall be + * registered + * + * Return: 0 case success, error code otherwise + */ +int kbase_set_custom_irq_handler(struct kbase_device *kbdev, + irq_handler_t custom_handler, + int irq_type) +{ + int result = 0; + irq_handler_t requested_irq_handler = NULL; + + KBASE_DEBUG_ASSERT((JOB_IRQ_HANDLER <= irq_type) && + (GPU_IRQ_HANDLER >= irq_type)); + + /* Release previous handler */ + if (kbdev->irqs[irq_type].irq) + free_irq(kbdev->irqs[irq_type].irq, kbase_tag(kbdev, irq_type)); + + requested_irq_handler = (NULL != custom_handler) ? custom_handler : + kbase_handler_table[irq_type]; + + if (0 != request_irq(kbdev->irqs[irq_type].irq, + requested_irq_handler, + kbdev->irqs[irq_type].flags | IRQF_SHARED, + dev_name(kbdev->dev), kbase_tag(kbdev, irq_type))) { + result = -EINVAL; + dev_err(kbdev->dev, "Can't request interrupt %d (index %d)\n", + kbdev->irqs[irq_type].irq, irq_type); +#ifdef CONFIG_SPARSE_IRQ + dev_err(kbdev->dev, "You have CONFIG_SPARSE_IRQ support enabled - is the interrupt number correct for this configuration?\n"); +#endif /* CONFIG_SPARSE_IRQ */ + } + + return result; +} + +KBASE_EXPORT_TEST_API(kbase_set_custom_irq_handler); + +/* test correct interrupt assigment and reception by cpu */ +struct kbasep_irq_test { + struct hrtimer timer; + wait_queue_head_t wait; + int triggered; + u32 timeout; +}; + +static struct kbasep_irq_test kbasep_irq_test_data; + +#define IRQ_TEST_TIMEOUT 500 + +static irqreturn_t kbase_job_irq_test_handler(int irq, void *data) +{ + unsigned long flags; + struct kbase_device *kbdev = kbase_untag(data); + u32 val; + + spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock, flags); + + if (!kbdev->pm.backend.gpu_powered) { + /* GPU is turned off - IRQ is not for us */ + spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, + flags); + return IRQ_NONE; + } + + val = kbase_reg_read(kbdev, JOB_CONTROL_REG(JOB_IRQ_STATUS), NULL); + + spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, flags); + + if (!val) + return IRQ_NONE; + + dev_dbg(kbdev->dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val); + + kbasep_irq_test_data.triggered = 1; + wake_up(&kbasep_irq_test_data.wait); + + kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_CLEAR), val, NULL); + + return IRQ_HANDLED; +} + +static irqreturn_t kbase_mmu_irq_test_handler(int irq, void *data) +{ + unsigned long flags; + struct kbase_device *kbdev = kbase_untag(data); + u32 val; + + spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock, flags); + + if (!kbdev->pm.backend.gpu_powered) { + /* GPU is turned off - IRQ is not for us */ + spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, + flags); + return IRQ_NONE; + } + + val = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_STATUS), NULL); + + spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, flags); + + if (!val) + return IRQ_NONE; + + dev_dbg(kbdev->dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val); + + kbasep_irq_test_data.triggered = 1; + wake_up(&kbasep_irq_test_data.wait); + + kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), val, NULL); + + return IRQ_HANDLED; +} + +static enum hrtimer_restart kbasep_test_interrupt_timeout(struct hrtimer *timer) +{ + struct kbasep_irq_test *test_data = container_of(timer, + struct kbasep_irq_test, timer); + + test_data->timeout = 1; + test_data->triggered = 1; + wake_up(&test_data->wait); + return HRTIMER_NORESTART; +} + +static int kbasep_common_test_interrupt( + struct kbase_device * const kbdev, u32 tag) +{ + int err = 0; + irq_handler_t test_handler; + + u32 old_mask_val; + u16 mask_offset; + u16 rawstat_offset; + + switch (tag) { + case JOB_IRQ_TAG: + test_handler = kbase_job_irq_test_handler; + rawstat_offset = JOB_CONTROL_REG(JOB_IRQ_RAWSTAT); + mask_offset = JOB_CONTROL_REG(JOB_IRQ_MASK); + break; + case MMU_IRQ_TAG: + test_handler = kbase_mmu_irq_test_handler; + rawstat_offset = MMU_REG(MMU_IRQ_RAWSTAT); + mask_offset = MMU_REG(MMU_IRQ_MASK); + break; + case GPU_IRQ_TAG: + /* already tested by pm_driver - bail out */ + default: + return 0; + } + + /* store old mask */ + old_mask_val = kbase_reg_read(kbdev, mask_offset, NULL); + /* mask interrupts */ + kbase_reg_write(kbdev, mask_offset, 0x0, NULL); + + if (kbdev->irqs[tag].irq) { + /* release original handler and install test handler */ + if (kbase_set_custom_irq_handler(kbdev, test_handler, tag) != 0) { + err = -EINVAL; + } else { + kbasep_irq_test_data.timeout = 0; + hrtimer_init(&kbasep_irq_test_data.timer, + CLOCK_MONOTONIC, HRTIMER_MODE_REL); + kbasep_irq_test_data.timer.function = + kbasep_test_interrupt_timeout; + + /* trigger interrupt */ + kbase_reg_write(kbdev, mask_offset, 0x1, NULL); + kbase_reg_write(kbdev, rawstat_offset, 0x1, NULL); + + hrtimer_start(&kbasep_irq_test_data.timer, + HR_TIMER_DELAY_MSEC(IRQ_TEST_TIMEOUT), + HRTIMER_MODE_REL); + + wait_event(kbasep_irq_test_data.wait, + kbasep_irq_test_data.triggered != 0); + + if (kbasep_irq_test_data.timeout != 0) { + dev_err(kbdev->dev, "Interrupt %d (index %d) didn't reach CPU.\n", + kbdev->irqs[tag].irq, tag); + err = -EINVAL; + } else { + dev_dbg(kbdev->dev, "Interrupt %d (index %d) reached CPU.\n", + kbdev->irqs[tag].irq, tag); + } + + hrtimer_cancel(&kbasep_irq_test_data.timer); + kbasep_irq_test_data.triggered = 0; + + /* mask interrupts */ + kbase_reg_write(kbdev, mask_offset, 0x0, NULL); + + /* release test handler */ + free_irq(kbdev->irqs[tag].irq, kbase_tag(kbdev, tag)); + } + + /* restore original interrupt */ + if (request_irq(kbdev->irqs[tag].irq, kbase_handler_table[tag], + kbdev->irqs[tag].flags | IRQF_SHARED, + dev_name(kbdev->dev), kbase_tag(kbdev, tag))) { + dev_err(kbdev->dev, "Can't restore original interrupt %d (index %d)\n", + kbdev->irqs[tag].irq, tag); + err = -EINVAL; + } + } + /* restore old mask */ + kbase_reg_write(kbdev, mask_offset, old_mask_val, NULL); + + return err; +} + +int kbasep_common_test_interrupt_handlers( + struct kbase_device * const kbdev) +{ + int err; + + init_waitqueue_head(&kbasep_irq_test_data.wait); + kbasep_irq_test_data.triggered = 0; + + /* A suspend won't happen during startup/insmod */ + kbase_pm_context_active(kbdev); + + err = kbasep_common_test_interrupt(kbdev, JOB_IRQ_TAG); + if (err) { + dev_err(kbdev->dev, "Interrupt JOB_IRQ didn't reach CPU. Check interrupt assignments.\n"); + goto out; + } + + err = kbasep_common_test_interrupt(kbdev, MMU_IRQ_TAG); + if (err) { + dev_err(kbdev->dev, "Interrupt MMU_IRQ didn't reach CPU. Check interrupt assignments.\n"); + goto out; + } + + dev_dbg(kbdev->dev, "Interrupts are correctly assigned.\n"); + + out: + kbase_pm_context_idle(kbdev); + + return err; +} +#endif /* CONFIG_MALI_DEBUG */ + +int kbase_install_interrupts(struct kbase_device *kbdev) +{ + u32 nr = ARRAY_SIZE(kbase_handler_table); + int err; + u32 i; + + for (i = 0; i < nr; i++) { + err = request_irq(kbdev->irqs[i].irq, kbase_handler_table[i], + kbdev->irqs[i].flags | IRQF_SHARED, + dev_name(kbdev->dev), + kbase_tag(kbdev, i)); + if (err) { + dev_err(kbdev->dev, "Can't request interrupt %d (index %d)\n", + kbdev->irqs[i].irq, i); +#ifdef CONFIG_SPARSE_IRQ + dev_err(kbdev->dev, "You have CONFIG_SPARSE_IRQ support enabled - is the interrupt number correct for this configuration?\n"); +#endif /* CONFIG_SPARSE_IRQ */ + goto release; + } + } + + return 0; + + release: + while (i-- > 0) + free_irq(kbdev->irqs[i].irq, kbase_tag(kbdev, i)); + + return err; +} + +void kbase_release_interrupts(struct kbase_device *kbdev) +{ + u32 nr = ARRAY_SIZE(kbase_handler_table); + u32 i; + + for (i = 0; i < nr; i++) { + if (kbdev->irqs[i].irq) + free_irq(kbdev->irqs[i].irq, kbase_tag(kbdev, i)); + } +} + +void kbase_synchronize_irqs(struct kbase_device *kbdev) +{ + u32 nr = ARRAY_SIZE(kbase_handler_table); + u32 i; + + for (i = 0; i < nr; i++) { + if (kbdev->irqs[i].irq) + synchronize_irq(kbdev->irqs[i].irq); + } +} + +#endif /* !defined(CONFIG_MALI_NO_MALI) */ diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_as.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_as.c new file mode 100644 index 00000000000000..c660c80341f476 --- /dev/null +++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_as.c @@ -0,0 +1,235 @@ +/* + * + * (C) COPYRIGHT 2014-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + +/* + * Register backend context / address space management + */ + +#include +#include +#include + +/** + * assign_and_activate_kctx_addr_space - Assign an AS to a context + * @kbdev: Kbase device + * @kctx: Kbase context + * @current_as: Address Space to assign + * + * Assign an Address Space (AS) to a context, and add the context to the Policy. + * + * This includes + * setting up the global runpool_irq structure and the context on the AS, + * Activating the MMU on the AS, + * Allowing jobs to be submitted on the AS. + * + * Context: + * kbasep_js_kctx_info.jsctx_mutex held, + * kbasep_js_device_data.runpool_mutex held, + * AS transaction mutex held, + * Runpool IRQ lock held + */ +static void assign_and_activate_kctx_addr_space(struct kbase_device *kbdev, + struct kbase_context *kctx, + struct kbase_as *current_as) +{ + struct kbasep_js_device_data *js_devdata = &kbdev->js_data; + + lockdep_assert_held(&kctx->jctx.sched_info.ctx.jsctx_mutex); + lockdep_assert_held(&js_devdata->runpool_mutex); + lockdep_assert_held(&kbdev->hwaccess_lock); + + /* Attribute handling */ + kbasep_js_ctx_attr_runpool_retain_ctx(kbdev, kctx); + + /* Allow it to run jobs */ + kbasep_js_set_submit_allowed(js_devdata, kctx); + + kbase_js_runpool_inc_context_count(kbdev, kctx); +} + +bool kbase_backend_use_ctx_sched(struct kbase_device *kbdev, + struct kbase_context *kctx) +{ + int i; + + if (kbdev->hwaccess.active_kctx == kctx) { + /* Context is already active */ + return true; + } + + for (i = 0; i < kbdev->nr_hw_address_spaces; i++) { + if (kbdev->as_to_kctx[i] == kctx) { + /* Context already has ASID - mark as active */ + return true; + } + } + + /* Context does not have address space assigned */ + return false; +} + +void kbase_backend_release_ctx_irq(struct kbase_device *kbdev, + struct kbase_context *kctx) +{ + int as_nr = kctx->as_nr; + + if (as_nr == KBASEP_AS_NR_INVALID) { + WARN(1, "Attempting to release context without ASID\n"); + return; + } + + lockdep_assert_held(&kbdev->hwaccess_lock); + + if (atomic_read(&kctx->refcount) != 1) { + WARN(1, "Attempting to release active ASID\n"); + return; + } + + kbasep_js_clear_submit_allowed(&kbdev->js_data, kctx); + + kbase_ctx_sched_release_ctx(kctx); + kbase_js_runpool_dec_context_count(kbdev, kctx); +} + +void kbase_backend_release_ctx_noirq(struct kbase_device *kbdev, + struct kbase_context *kctx) +{ +} + +int kbase_backend_find_and_release_free_address_space( + struct kbase_device *kbdev, struct kbase_context *kctx) +{ + struct kbasep_js_device_data *js_devdata; + struct kbasep_js_kctx_info *js_kctx_info; + unsigned long flags; + int i; + + js_devdata = &kbdev->js_data; + js_kctx_info = &kctx->jctx.sched_info; + + mutex_lock(&js_kctx_info->ctx.jsctx_mutex); + mutex_lock(&js_devdata->runpool_mutex); + + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); + + for (i = 0; i < kbdev->nr_hw_address_spaces; i++) { + struct kbasep_js_kctx_info *as_js_kctx_info; + struct kbase_context *as_kctx; + + as_kctx = kbdev->as_to_kctx[i]; + as_js_kctx_info = &as_kctx->jctx.sched_info; + + /* Don't release privileged or active contexts, or contexts with + * jobs running. + * Note that a context will have at least 1 reference (which + * was previously taken by kbasep_js_schedule_ctx()) until + * descheduled. + */ + if (as_kctx && !kbase_ctx_flag(as_kctx, KCTX_PRIVILEGED) && + atomic_read(&as_kctx->refcount) == 1) { + if (!kbasep_js_runpool_retain_ctx_nolock(kbdev, + as_kctx)) { + WARN(1, "Failed to retain active context\n"); + + spin_unlock_irqrestore(&kbdev->hwaccess_lock, + flags); + mutex_unlock(&js_devdata->runpool_mutex); + mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); + + return KBASEP_AS_NR_INVALID; + } + + kbasep_js_clear_submit_allowed(js_devdata, as_kctx); + + /* Drop and retake locks to take the jsctx_mutex on the + * context we're about to release without violating lock + * ordering + */ + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); + mutex_unlock(&js_devdata->runpool_mutex); + mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); + + + /* Release context from address space */ + mutex_lock(&as_js_kctx_info->ctx.jsctx_mutex); + mutex_lock(&js_devdata->runpool_mutex); + + kbasep_js_runpool_release_ctx_nolock(kbdev, as_kctx); + + if (!kbase_ctx_flag(as_kctx, KCTX_SCHEDULED)) { + kbasep_js_runpool_requeue_or_kill_ctx(kbdev, + as_kctx, + true); + + mutex_unlock(&js_devdata->runpool_mutex); + mutex_unlock(&as_js_kctx_info->ctx.jsctx_mutex); + + return i; + } + + /* Context was retained while locks were dropped, + * continue looking for free AS */ + + mutex_unlock(&js_devdata->runpool_mutex); + mutex_unlock(&as_js_kctx_info->ctx.jsctx_mutex); + + mutex_lock(&js_kctx_info->ctx.jsctx_mutex); + mutex_lock(&js_devdata->runpool_mutex); + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); + } + } + + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); + + mutex_unlock(&js_devdata->runpool_mutex); + mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); + + return KBASEP_AS_NR_INVALID; +} + +bool kbase_backend_use_ctx(struct kbase_device *kbdev, + struct kbase_context *kctx, + int as_nr) +{ + struct kbasep_js_device_data *js_devdata; + struct kbase_as *new_address_space = NULL; + + js_devdata = &kbdev->js_data; + + if (kbdev->hwaccess.active_kctx == kctx) { + WARN(1, "Context is already scheduled in\n"); + return false; + } + + new_address_space = &kbdev->as[as_nr]; + + lockdep_assert_held(&js_devdata->runpool_mutex); + lockdep_assert_held(&kbdev->mmu_hw_mutex); + lockdep_assert_held(&kbdev->hwaccess_lock); + + assign_and_activate_kctx_addr_space(kbdev, kctx, new_address_space); + + if (kbase_ctx_flag(kctx, KCTX_PRIVILEGED)) { + /* We need to retain it to keep the corresponding address space + */ + kbasep_js_runpool_retain_ctx_nolock(kbdev, kctx); + } + + return true; +} + diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_defs.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_defs.h new file mode 100644 index 00000000000000..08a7400e66d588 --- /dev/null +++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_defs.h @@ -0,0 +1,123 @@ +/* + * + * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + +/* + * Register-based HW access backend specific definitions + */ + +#ifndef _KBASE_HWACCESS_GPU_DEFS_H_ +#define _KBASE_HWACCESS_GPU_DEFS_H_ + +/* SLOT_RB_SIZE must be < 256 */ +#define SLOT_RB_SIZE 2 +#define SLOT_RB_MASK (SLOT_RB_SIZE - 1) + +/** + * struct rb_entry - Ringbuffer entry + * @katom: Atom associated with this entry + */ +struct rb_entry { + struct kbase_jd_atom *katom; +}; + +/** + * struct slot_rb - Slot ringbuffer + * @entries: Ringbuffer entries + * @last_context: The last context to submit a job on this slot + * @read_idx: Current read index of buffer + * @write_idx: Current write index of buffer + * @job_chain_flag: Flag used to implement jobchain disambiguation + */ +struct slot_rb { + struct rb_entry entries[SLOT_RB_SIZE]; + + struct kbase_context *last_context; + + u8 read_idx; + u8 write_idx; + + u8 job_chain_flag; +}; + +/** + * struct kbase_backend_data - GPU backend specific data for HW access layer + * @slot_rb: Slot ringbuffers + * @rmu_workaround_flag: When PRLAM-8987 is present, this flag determines + * whether slots 0/1 or slot 2 are currently being + * pulled from + * @scheduling_timer: The timer tick used for rescheduling jobs + * @timer_running: Is the timer running? The runpool_mutex must be + * held whilst modifying this. + * @suspend_timer: Is the timer suspended? Set when a suspend + * occurs and cleared on resume. The runpool_mutex + * must be held whilst modifying this. + * @reset_gpu: Set to a KBASE_RESET_xxx value (see comments) + * @reset_workq: Work queue for performing the reset + * @reset_work: Work item for performing the reset + * @reset_wait: Wait event signalled when the reset is complete + * @reset_timer: Timeout for soft-stops before the reset + * @timeouts_updated: Have timeout values just been updated? + * + * The hwaccess_lock (a spinlock) must be held when accessing this structure + */ +struct kbase_backend_data { + struct slot_rb slot_rb[BASE_JM_MAX_NR_SLOTS]; + + bool rmu_workaround_flag; + + struct hrtimer scheduling_timer; + + bool timer_running; + bool suspend_timer; + + atomic_t reset_gpu; + +/* The GPU reset isn't pending */ +#define KBASE_RESET_GPU_NOT_PENDING 0 +/* kbase_prepare_to_reset_gpu has been called */ +#define KBASE_RESET_GPU_PREPARED 1 +/* kbase_reset_gpu has been called - the reset will now definitely happen + * within the timeout period */ +#define KBASE_RESET_GPU_COMMITTED 2 +/* The GPU reset process is currently occuring (timeout has expired or + * kbasep_try_reset_gpu_early was called) */ +#define KBASE_RESET_GPU_HAPPENING 3 +/* Reset the GPU silently, used when resetting the GPU as part of normal + * behavior (e.g. when exiting protected mode). */ +#define KBASE_RESET_GPU_SILENT 4 + struct workqueue_struct *reset_workq; + struct work_struct reset_work; + wait_queue_head_t reset_wait; + struct hrtimer reset_timer; + + bool timeouts_updated; +}; + +/** + * struct kbase_jd_atom_backend - GPU backend specific katom data + */ +struct kbase_jd_atom_backend { +}; + +/** + * struct kbase_context_backend - GPU backend specific context data + */ +struct kbase_context_backend { +}; + +#endif /* _KBASE_HWACCESS_GPU_DEFS_H_ */ diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_hw.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_hw.c new file mode 100644 index 00000000000000..0a2a0b7b9965dd --- /dev/null +++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_hw.c @@ -0,0 +1,1459 @@ +/* + * + * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +/* + * Base kernel job manager APIs + */ + +#include +#include +#include +#if defined(CONFIG_MALI_GATOR_SUPPORT) +#include +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define beenthere(kctx, f, a...) \ + dev_dbg(kctx->kbdev->dev, "%s:" f, __func__, ##a) + +#if KBASE_GPU_RESET_EN +static void kbasep_try_reset_gpu_early(struct kbase_device *kbdev); +static void kbasep_reset_timeout_worker(struct work_struct *data); +static enum hrtimer_restart kbasep_reset_timer_callback(struct hrtimer *timer); +#endif /* KBASE_GPU_RESET_EN */ + +static inline int kbasep_jm_is_js_free(struct kbase_device *kbdev, int js, + struct kbase_context *kctx) +{ + return !kbase_reg_read(kbdev, JOB_SLOT_REG(js, JS_COMMAND_NEXT), kctx); +} + +void kbase_job_hw_submit(struct kbase_device *kbdev, + struct kbase_jd_atom *katom, + int js) +{ + struct kbase_context *kctx; + u32 cfg; + u64 jc_head = katom->jc; + + KBASE_DEBUG_ASSERT(kbdev); + KBASE_DEBUG_ASSERT(katom); + + kctx = katom->kctx; + + /* Command register must be available */ + KBASE_DEBUG_ASSERT(kbasep_jm_is_js_free(kbdev, js, kctx)); + /* Affinity is not violating */ + kbase_js_debug_log_current_affinities(kbdev); + KBASE_DEBUG_ASSERT(!kbase_js_affinity_would_violate(kbdev, js, + katom->affinity)); + + kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_HEAD_NEXT_LO), + jc_head & 0xFFFFFFFF, kctx); + kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_HEAD_NEXT_HI), + jc_head >> 32, kctx); + + kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_AFFINITY_NEXT_LO), + katom->affinity & 0xFFFFFFFF, kctx); + kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_AFFINITY_NEXT_HI), + katom->affinity >> 32, kctx); + + /* start MMU, medium priority, cache clean/flush on end, clean/flush on + * start */ + cfg = kctx->as_nr; + + if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_FLUSH_REDUCTION) && + !(kbdev->serialize_jobs & KBASE_SERIALIZE_RESET)) + cfg |= JS_CONFIG_ENABLE_FLUSH_REDUCTION; + + if (0 != (katom->core_req & BASE_JD_REQ_SKIP_CACHE_START)) + cfg |= JS_CONFIG_START_FLUSH_NO_ACTION; + else + cfg |= JS_CONFIG_START_FLUSH_CLEAN_INVALIDATE; + + if (0 != (katom->core_req & BASE_JD_REQ_SKIP_CACHE_END) && + !(kbdev->serialize_jobs & KBASE_SERIALIZE_RESET)) + cfg |= JS_CONFIG_END_FLUSH_NO_ACTION; + else + cfg |= JS_CONFIG_END_FLUSH_CLEAN_INVALIDATE; + + if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10649)) + cfg |= JS_CONFIG_START_MMU; + + cfg |= JS_CONFIG_THREAD_PRI(8); + + if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_PROTECTED_MODE) && + (katom->atom_flags & KBASE_KATOM_FLAG_PROTECTED)) + cfg |= JS_CONFIG_DISABLE_DESCRIPTOR_WR_BK; + + if (kbase_hw_has_feature(kbdev, + BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION)) { + if (!kbdev->hwaccess.backend.slot_rb[js].job_chain_flag) { + cfg |= JS_CONFIG_JOB_CHAIN_FLAG; + katom->atom_flags |= KBASE_KATOM_FLAGS_JOBCHAIN; + kbdev->hwaccess.backend.slot_rb[js].job_chain_flag = + true; + } else { + katom->atom_flags &= ~KBASE_KATOM_FLAGS_JOBCHAIN; + kbdev->hwaccess.backend.slot_rb[js].job_chain_flag = + false; + } + } + + kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_CONFIG_NEXT), cfg, kctx); + + if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_FLUSH_REDUCTION)) + kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_FLUSH_ID_NEXT), + katom->flush_id, kctx); + + /* Write an approximate start timestamp. + * It's approximate because there might be a job in the HEAD register. + */ + katom->start_timestamp = ktime_get(); + + /* GO ! */ + dev_dbg(kbdev->dev, "JS: Submitting atom %p from ctx %p to js[%d] with head=0x%llx, affinity=0x%llx", + katom, kctx, js, jc_head, katom->affinity); + + KBASE_TRACE_ADD_SLOT_INFO(kbdev, JM_SUBMIT, kctx, katom, jc_head, js, + (u32) katom->affinity); + +#if defined(CONFIG_MALI_GATOR_SUPPORT) + kbase_trace_mali_job_slots_event( + GATOR_MAKE_EVENT(GATOR_JOB_SLOT_START, js), + kctx, kbase_jd_atom_id(kctx, katom)); +#endif + KBASE_TLSTREAM_TL_ATTRIB_ATOM_CONFIG(katom, jc_head, + katom->affinity, cfg); + KBASE_TLSTREAM_TL_RET_CTX_LPU( + kctx, + &kbdev->gpu_props.props.raw_props.js_features[ + katom->slot_nr]); + KBASE_TLSTREAM_TL_RET_ATOM_AS(katom, &kbdev->as[kctx->as_nr]); + KBASE_TLSTREAM_TL_RET_ATOM_LPU( + katom, + &kbdev->gpu_props.props.raw_props.js_features[js], + "ctx_nr,atom_nr"); +#ifdef CONFIG_GPU_TRACEPOINTS + if (!kbase_backend_nr_atoms_submitted(kbdev, js)) { + /* If this is the only job on the slot, trace it as starting */ + char js_string[16]; + + trace_gpu_sched_switch( + kbasep_make_job_slot_string(js, js_string, + sizeof(js_string)), + ktime_to_ns(katom->start_timestamp), + (u32)katom->kctx->id, 0, katom->work_id); + kbdev->hwaccess.backend.slot_rb[js].last_context = katom->kctx; + } +#endif + kbase_timeline_job_slot_submit(kbdev, kctx, katom, js); + + kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_COMMAND_NEXT), + JS_COMMAND_START, katom->kctx); +} + +/** + * kbasep_job_slot_update_head_start_timestamp - Update timestamp + * @kbdev: kbase device + * @js: job slot + * @end_timestamp: timestamp + * + * Update the start_timestamp of the job currently in the HEAD, based on the + * fact that we got an IRQ for the previous set of completed jobs. + * + * The estimate also takes into account the time the job was submitted, to + * work out the best estimate (which might still result in an over-estimate to + * the calculated time spent) + */ +static void kbasep_job_slot_update_head_start_timestamp( + struct kbase_device *kbdev, + int js, + ktime_t end_timestamp) +{ + if (kbase_backend_nr_atoms_on_slot(kbdev, js) > 0) { + struct kbase_jd_atom *katom; + ktime_t timestamp_diff; + /* The atom in the HEAD */ + katom = kbase_gpu_inspect(kbdev, js, 0); + + KBASE_DEBUG_ASSERT(katom != NULL); + + timestamp_diff = ktime_sub(end_timestamp, + katom->start_timestamp); + if (ktime_to_ns(timestamp_diff) >= 0) { + /* Only update the timestamp if it's a better estimate + * than what's currently stored. This is because our + * estimate that accounts for the throttle time may be + * too much of an overestimate */ + katom->start_timestamp = end_timestamp; + } + } +} + +/** + * kbasep_trace_tl_event_lpu_softstop - Call event_lpu_softstop timeline + * tracepoint + * @kbdev: kbase device + * @js: job slot + * + * Make a tracepoint call to the instrumentation module informing that + * softstop happened on given lpu (job slot). + */ +static void kbasep_trace_tl_event_lpu_softstop(struct kbase_device *kbdev, + int js) +{ + KBASE_TLSTREAM_TL_EVENT_LPU_SOFTSTOP( + &kbdev->gpu_props.props.raw_props.js_features[js]); +} + +void kbase_job_done(struct kbase_device *kbdev, u32 done) +{ + unsigned long flags; + int i; + u32 count = 0; + ktime_t end_timestamp = ktime_get(); + + KBASE_DEBUG_ASSERT(kbdev); + + KBASE_TRACE_ADD(kbdev, JM_IRQ, NULL, NULL, 0, done); + + memset(&kbdev->slot_submit_count_irq[0], 0, + sizeof(kbdev->slot_submit_count_irq)); + + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); + + while (done) { + u32 failed = done >> 16; + + /* treat failed slots as finished slots */ + u32 finished = (done & 0xFFFF) | failed; + + /* Note: This is inherently unfair, as we always check + * for lower numbered interrupts before the higher + * numbered ones.*/ + i = ffs(finished) - 1; + KBASE_DEBUG_ASSERT(i >= 0); + + do { + int nr_done; + u32 active; + u32 completion_code = BASE_JD_EVENT_DONE;/* assume OK */ + u64 job_tail = 0; + + if (failed & (1u << i)) { + /* read out the job slot status code if the job + * slot reported failure */ + completion_code = kbase_reg_read(kbdev, + JOB_SLOT_REG(i, JS_STATUS), NULL); + + switch (completion_code) { + case BASE_JD_EVENT_STOPPED: +#if defined(CONFIG_MALI_GATOR_SUPPORT) + kbase_trace_mali_job_slots_event( + GATOR_MAKE_EVENT( + GATOR_JOB_SLOT_SOFT_STOPPED, i), + NULL, 0); +#endif + + kbasep_trace_tl_event_lpu_softstop( + kbdev, i); + + /* Soft-stopped job - read the value of + * JS_TAIL so that the job chain can + * be resumed */ + job_tail = (u64)kbase_reg_read(kbdev, + JOB_SLOT_REG(i, JS_TAIL_LO), + NULL) | + ((u64)kbase_reg_read(kbdev, + JOB_SLOT_REG(i, JS_TAIL_HI), + NULL) << 32); + break; + case BASE_JD_EVENT_NOT_STARTED: + /* PRLAM-10673 can cause a TERMINATED + * job to come back as NOT_STARTED, but + * the error interrupt helps us detect + * it */ + completion_code = + BASE_JD_EVENT_TERMINATED; + /* fall through */ + default: + dev_warn(kbdev->dev, "error detected from slot %d, job status 0x%08x (%s)", + i, completion_code, + kbase_exception_name + (kbdev, + completion_code)); + } + + kbase_gpu_irq_evict(kbdev, i); + } + + kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_CLEAR), + done & ((1 << i) | (1 << (i + 16))), + NULL); + active = kbase_reg_read(kbdev, + JOB_CONTROL_REG(JOB_IRQ_JS_STATE), + NULL); + + if (((active >> i) & 1) == 0 && + (((done >> (i + 16)) & 1) == 0)) { + /* There is a potential race we must work + * around: + * + * 1. A job slot has a job in both current and + * next registers + * 2. The job in current completes + * successfully, the IRQ handler reads + * RAWSTAT and calls this function with the + * relevant bit set in "done" + * 3. The job in the next registers becomes the + * current job on the GPU + * 4. Sometime before the JOB_IRQ_CLEAR line + * above the job on the GPU _fails_ + * 5. The IRQ_CLEAR clears the done bit but not + * the failed bit. This atomically sets + * JOB_IRQ_JS_STATE. However since both jobs + * have now completed the relevant bits for + * the slot are set to 0. + * + * If we now did nothing then we'd incorrectly + * assume that _both_ jobs had completed + * successfully (since we haven't yet observed + * the fail bit being set in RAWSTAT). + * + * So at this point if there are no active jobs + * left we check to see if RAWSTAT has a failure + * bit set for the job slot. If it does we know + * that there has been a new failure that we + * didn't previously know about, so we make sure + * that we record this in active (but we wait + * for the next loop to deal with it). + * + * If we were handling a job failure (i.e. done + * has the relevant high bit set) then we know + * that the value read back from + * JOB_IRQ_JS_STATE is the correct number of + * remaining jobs because the failed job will + * have prevented any futher jobs from starting + * execution. + */ + u32 rawstat = kbase_reg_read(kbdev, + JOB_CONTROL_REG(JOB_IRQ_RAWSTAT), NULL); + + if ((rawstat >> (i + 16)) & 1) { + /* There is a failed job that we've + * missed - add it back to active */ + active |= (1u << i); + } + } + + dev_dbg(kbdev->dev, "Job ended with status 0x%08X\n", + completion_code); + + nr_done = kbase_backend_nr_atoms_submitted(kbdev, i); + nr_done -= (active >> i) & 1; + nr_done -= (active >> (i + 16)) & 1; + + if (nr_done <= 0) { + dev_warn(kbdev->dev, "Spurious interrupt on slot %d", + i); + + goto spurious; + } + + count += nr_done; + + while (nr_done) { + if (nr_done == 1) { + kbase_gpu_complete_hw(kbdev, i, + completion_code, + job_tail, + &end_timestamp); + kbase_jm_try_kick_all(kbdev); + } else { + /* More than one job has completed. + * Since this is not the last job being + * reported this time it must have + * passed. This is because the hardware + * will not allow further jobs in a job + * slot to complete until the failed job + * is cleared from the IRQ status. + */ + kbase_gpu_complete_hw(kbdev, i, + BASE_JD_EVENT_DONE, + 0, + &end_timestamp); + } + nr_done--; + } + spurious: + done = kbase_reg_read(kbdev, + JOB_CONTROL_REG(JOB_IRQ_RAWSTAT), NULL); + + if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10883)) { + /* Workaround for missing interrupt caused by + * PRLAM-10883 */ + if (((active >> i) & 1) && (0 == + kbase_reg_read(kbdev, + JOB_SLOT_REG(i, + JS_STATUS), NULL))) { + /* Force job slot to be processed again + */ + done |= (1u << i); + } + } + + failed = done >> 16; + finished = (done & 0xFFFF) | failed; + if (done) + end_timestamp = ktime_get(); + } while (finished & (1 << i)); + + kbasep_job_slot_update_head_start_timestamp(kbdev, i, + end_timestamp); + } + + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); +#if KBASE_GPU_RESET_EN + if (atomic_read(&kbdev->hwaccess.backend.reset_gpu) == + KBASE_RESET_GPU_COMMITTED) { + /* If we're trying to reset the GPU then we might be able to do + * it early (without waiting for a timeout) because some jobs + * have completed + */ + kbasep_try_reset_gpu_early(kbdev); + } +#endif /* KBASE_GPU_RESET_EN */ + KBASE_TRACE_ADD(kbdev, JM_IRQ_END, NULL, NULL, 0, count); +} +KBASE_EXPORT_TEST_API(kbase_job_done); + +static bool kbasep_soft_stop_allowed(struct kbase_device *kbdev, + struct kbase_jd_atom *katom) +{ + bool soft_stops_allowed = true; + + if (kbase_jd_katom_is_protected(katom)) { + soft_stops_allowed = false; + } else if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8408)) { + if ((katom->core_req & BASE_JD_REQ_T) != 0) + soft_stops_allowed = false; + } + return soft_stops_allowed; +} + +static bool kbasep_hard_stop_allowed(struct kbase_device *kbdev, + base_jd_core_req core_reqs) +{ + bool hard_stops_allowed = true; + + if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8394)) { + if ((core_reqs & BASE_JD_REQ_T) != 0) + hard_stops_allowed = false; + } + return hard_stops_allowed; +} + +void kbasep_job_slot_soft_or_hard_stop_do_action(struct kbase_device *kbdev, + int js, + u32 action, + base_jd_core_req core_reqs, + struct kbase_jd_atom *target_katom) +{ + struct kbase_context *kctx = target_katom->kctx; +#if KBASE_TRACE_ENABLE + u32 status_reg_before; + u64 job_in_head_before; + u32 status_reg_after; + + KBASE_DEBUG_ASSERT(!(action & (~JS_COMMAND_MASK))); + + /* Check the head pointer */ + job_in_head_before = ((u64) kbase_reg_read(kbdev, + JOB_SLOT_REG(js, JS_HEAD_LO), NULL)) + | (((u64) kbase_reg_read(kbdev, + JOB_SLOT_REG(js, JS_HEAD_HI), NULL)) + << 32); + status_reg_before = kbase_reg_read(kbdev, JOB_SLOT_REG(js, JS_STATUS), + NULL); +#endif + + if (action == JS_COMMAND_SOFT_STOP) { + bool soft_stop_allowed = kbasep_soft_stop_allowed(kbdev, + target_katom); + + if (!soft_stop_allowed) { +#ifdef CONFIG_MALI_DEBUG + dev_dbg(kbdev->dev, + "Attempt made to soft-stop a job that cannot be soft-stopped. core_reqs = 0x%X", + (unsigned int)core_reqs); +#endif /* CONFIG_MALI_DEBUG */ + return; + } + + /* We are about to issue a soft stop, so mark the atom as having + * been soft stopped */ + target_katom->atom_flags |= KBASE_KATOM_FLAG_BEEN_SOFT_STOPPPED; + + /* Mark the point where we issue the soft-stop command */ + KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTSTOP_ISSUE(target_katom); + + if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316)) { + int i; + + for (i = 0; + i < kbase_backend_nr_atoms_submitted(kbdev, js); + i++) { + struct kbase_jd_atom *katom; + + katom = kbase_gpu_inspect(kbdev, js, i); + + KBASE_DEBUG_ASSERT(katom); + + /* For HW_ISSUE_8316, only 'bad' jobs attacking + * the system can cause this issue: normally, + * all memory should be allocated in multiples + * of 4 pages, and growable memory should be + * changed size in multiples of 4 pages. + * + * Whilst such 'bad' jobs can be cleared by a + * GPU reset, the locking up of a uTLB entry + * caused by the bad job could also stall other + * ASs, meaning that other ASs' jobs don't + * complete in the 'grace' period before the + * reset. We don't want to lose other ASs' jobs + * when they would normally complete fine, so we + * must 'poke' the MMU regularly to help other + * ASs complete */ + kbase_as_poking_timer_retain_atom( + kbdev, katom->kctx, katom); + } + } + + if (kbase_hw_has_feature( + kbdev, + BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION)) { + action = (target_katom->atom_flags & + KBASE_KATOM_FLAGS_JOBCHAIN) ? + JS_COMMAND_SOFT_STOP_1 : + JS_COMMAND_SOFT_STOP_0; + } + } else if (action == JS_COMMAND_HARD_STOP) { + bool hard_stop_allowed = kbasep_hard_stop_allowed(kbdev, + core_reqs); + + if (!hard_stop_allowed) { + /* Jobs can be hard-stopped for the following reasons: + * * CFS decides the job has been running too long (and + * soft-stop has not occurred). In this case the GPU + * will be reset by CFS if the job remains on the + * GPU. + * + * * The context is destroyed, kbase_jd_zap_context + * will attempt to hard-stop the job. However it also + * has a watchdog which will cause the GPU to be + * reset if the job remains on the GPU. + * + * * An (unhandled) MMU fault occurred. As long as + * BASE_HW_ISSUE_8245 is defined then the GPU will be + * reset. + * + * All three cases result in the GPU being reset if the + * hard-stop fails, so it is safe to just return and + * ignore the hard-stop request. + */ + dev_warn(kbdev->dev, + "Attempt made to hard-stop a job that cannot be hard-stopped. core_reqs = 0x%X", + (unsigned int)core_reqs); + return; + } + target_katom->atom_flags |= KBASE_KATOM_FLAG_BEEN_HARD_STOPPED; + + if (kbase_hw_has_feature( + kbdev, + BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION)) { + action = (target_katom->atom_flags & + KBASE_KATOM_FLAGS_JOBCHAIN) ? + JS_COMMAND_HARD_STOP_1 : + JS_COMMAND_HARD_STOP_0; + } + } + + kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_COMMAND), action, kctx); + +#if KBASE_TRACE_ENABLE + status_reg_after = kbase_reg_read(kbdev, JOB_SLOT_REG(js, JS_STATUS), + NULL); + if (status_reg_after == BASE_JD_EVENT_ACTIVE) { + struct kbase_jd_atom *head; + struct kbase_context *head_kctx; + + head = kbase_gpu_inspect(kbdev, js, 0); + head_kctx = head->kctx; + + if (status_reg_before == BASE_JD_EVENT_ACTIVE) + KBASE_TRACE_ADD_SLOT(kbdev, JM_CHECK_HEAD, head_kctx, + head, job_in_head_before, js); + else + KBASE_TRACE_ADD_SLOT(kbdev, JM_CHECK_HEAD, NULL, NULL, + 0, js); + + switch (action) { + case JS_COMMAND_SOFT_STOP: + KBASE_TRACE_ADD_SLOT(kbdev, JM_SOFTSTOP, head_kctx, + head, head->jc, js); + break; + case JS_COMMAND_SOFT_STOP_0: + KBASE_TRACE_ADD_SLOT(kbdev, JM_SOFTSTOP_0, head_kctx, + head, head->jc, js); + break; + case JS_COMMAND_SOFT_STOP_1: + KBASE_TRACE_ADD_SLOT(kbdev, JM_SOFTSTOP_1, head_kctx, + head, head->jc, js); + break; + case JS_COMMAND_HARD_STOP: + KBASE_TRACE_ADD_SLOT(kbdev, JM_HARDSTOP, head_kctx, + head, head->jc, js); + break; + case JS_COMMAND_HARD_STOP_0: + KBASE_TRACE_ADD_SLOT(kbdev, JM_HARDSTOP_0, head_kctx, + head, head->jc, js); + break; + case JS_COMMAND_HARD_STOP_1: + KBASE_TRACE_ADD_SLOT(kbdev, JM_HARDSTOP_1, head_kctx, + head, head->jc, js); + break; + default: + BUG(); + break; + } + } else { + if (status_reg_before == BASE_JD_EVENT_ACTIVE) + KBASE_TRACE_ADD_SLOT(kbdev, JM_CHECK_HEAD, NULL, NULL, + job_in_head_before, js); + else + KBASE_TRACE_ADD_SLOT(kbdev, JM_CHECK_HEAD, NULL, NULL, + 0, js); + + switch (action) { + case JS_COMMAND_SOFT_STOP: + KBASE_TRACE_ADD_SLOT(kbdev, JM_SOFTSTOP, NULL, NULL, 0, + js); + break; + case JS_COMMAND_SOFT_STOP_0: + KBASE_TRACE_ADD_SLOT(kbdev, JM_SOFTSTOP_0, NULL, NULL, + 0, js); + break; + case JS_COMMAND_SOFT_STOP_1: + KBASE_TRACE_ADD_SLOT(kbdev, JM_SOFTSTOP_1, NULL, NULL, + 0, js); + break; + case JS_COMMAND_HARD_STOP: + KBASE_TRACE_ADD_SLOT(kbdev, JM_HARDSTOP, NULL, NULL, 0, + js); + break; + case JS_COMMAND_HARD_STOP_0: + KBASE_TRACE_ADD_SLOT(kbdev, JM_HARDSTOP_0, NULL, NULL, + 0, js); + break; + case JS_COMMAND_HARD_STOP_1: + KBASE_TRACE_ADD_SLOT(kbdev, JM_HARDSTOP_1, NULL, NULL, + 0, js); + break; + default: + BUG(); + break; + } + } +#endif +} + +void kbase_backend_jm_kill_jobs_from_kctx(struct kbase_context *kctx) +{ + unsigned long flags; + struct kbase_device *kbdev; + int i; + + KBASE_DEBUG_ASSERT(kctx != NULL); + kbdev = kctx->kbdev; + KBASE_DEBUG_ASSERT(kbdev != NULL); + + /* Cancel any remaining running jobs for this kctx */ + mutex_lock(&kctx->jctx.lock); + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); + + /* Invalidate all jobs in context, to prevent re-submitting */ + for (i = 0; i < BASE_JD_ATOM_COUNT; i++) { + if (!work_pending(&kctx->jctx.atoms[i].work)) + kctx->jctx.atoms[i].event_code = + BASE_JD_EVENT_JOB_CANCELLED; + } + + for (i = 0; i < kbdev->gpu_props.num_job_slots; i++) + kbase_job_slot_hardstop(kctx, i, NULL); + + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); + mutex_unlock(&kctx->jctx.lock); +} + +void kbase_job_slot_ctx_priority_check_locked(struct kbase_context *kctx, + struct kbase_jd_atom *target_katom) +{ + struct kbase_device *kbdev; + int js = target_katom->slot_nr; + int priority = target_katom->sched_priority; + int i; + bool stop_sent = false; + + KBASE_DEBUG_ASSERT(kctx != NULL); + kbdev = kctx->kbdev; + KBASE_DEBUG_ASSERT(kbdev != NULL); + + lockdep_assert_held(&kbdev->hwaccess_lock); + + for (i = 0; i < kbase_backend_nr_atoms_on_slot(kbdev, js); i++) { + struct kbase_jd_atom *katom; + + katom = kbase_gpu_inspect(kbdev, js, i); + if (!katom) + continue; + + if (katom->kctx != kctx) + continue; + + if (katom->sched_priority > priority) { + if (!stop_sent) + KBASE_TLSTREAM_TL_ATTRIB_ATOM_PRIORITY_CHANGE( + target_katom); + + kbase_job_slot_softstop(kbdev, js, katom); + stop_sent = true; + } + } +} + +void kbase_jm_wait_for_zero_jobs(struct kbase_context *kctx) +{ + struct kbase_device *kbdev = kctx->kbdev; + unsigned long timeout = msecs_to_jiffies(ZAP_TIMEOUT); + + timeout = wait_event_timeout(kctx->jctx.zero_jobs_wait, + kctx->jctx.job_nr == 0, timeout); + + if (timeout != 0) + timeout = wait_event_timeout( + kctx->jctx.sched_info.ctx.is_scheduled_wait, + !kbase_ctx_flag(kctx, KCTX_SCHEDULED), + timeout); + + /* Neither wait timed out; all done! */ + if (timeout != 0) + goto exit; + +#if KBASE_GPU_RESET_EN + if (kbase_prepare_to_reset_gpu(kbdev)) { + dev_err(kbdev->dev, + "Issueing GPU soft-reset because jobs failed to be killed (within %d ms) as part of context termination (e.g. process exit)\n", + ZAP_TIMEOUT); + kbase_reset_gpu(kbdev); + } + + /* Wait for the reset to complete */ + wait_event(kbdev->hwaccess.backend.reset_wait, + atomic_read(&kbdev->hwaccess.backend.reset_gpu) + == KBASE_RESET_GPU_NOT_PENDING); +#else + dev_warn(kbdev->dev, + "Jobs failed to be killed (within %d ms) as part of context termination (e.g. process exit)\n", + ZAP_TIMEOUT); + +#endif +exit: + dev_dbg(kbdev->dev, "Zap: Finished Context %p", kctx); + + /* Ensure that the signallers of the waitqs have finished */ + mutex_lock(&kctx->jctx.lock); + mutex_lock(&kctx->jctx.sched_info.ctx.jsctx_mutex); + mutex_unlock(&kctx->jctx.sched_info.ctx.jsctx_mutex); + mutex_unlock(&kctx->jctx.lock); +} + +u32 kbase_backend_get_current_flush_id(struct kbase_device *kbdev) +{ + u32 flush_id = 0; + + if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_FLUSH_REDUCTION)) { + mutex_lock(&kbdev->pm.lock); + if (kbdev->pm.backend.gpu_powered) + flush_id = kbase_reg_read(kbdev, + GPU_CONTROL_REG(LATEST_FLUSH), NULL); + mutex_unlock(&kbdev->pm.lock); + } + + return flush_id; +} + +int kbase_job_slot_init(struct kbase_device *kbdev) +{ +#if KBASE_GPU_RESET_EN + kbdev->hwaccess.backend.reset_workq = alloc_workqueue( + "Mali reset workqueue", 0, 1); + if (NULL == kbdev->hwaccess.backend.reset_workq) + return -EINVAL; + + INIT_WORK(&kbdev->hwaccess.backend.reset_work, + kbasep_reset_timeout_worker); + + hrtimer_init(&kbdev->hwaccess.backend.reset_timer, CLOCK_MONOTONIC, + HRTIMER_MODE_REL); + kbdev->hwaccess.backend.reset_timer.function = + kbasep_reset_timer_callback; +#endif + + return 0; +} +KBASE_EXPORT_TEST_API(kbase_job_slot_init); + +void kbase_job_slot_halt(struct kbase_device *kbdev) +{ + CSTD_UNUSED(kbdev); +} + +void kbase_job_slot_term(struct kbase_device *kbdev) +{ +#if KBASE_GPU_RESET_EN + destroy_workqueue(kbdev->hwaccess.backend.reset_workq); +#endif +} +KBASE_EXPORT_TEST_API(kbase_job_slot_term); + +#if KBASE_GPU_RESET_EN +/** + * kbasep_check_for_afbc_on_slot() - Check whether AFBC is in use on this slot + * @kbdev: kbase device pointer + * @kctx: context to check against + * @js: slot to check + * @target_katom: An atom to check, or NULL if all atoms from @kctx on + * slot @js should be checked + * + * This checks are based upon parameters that would normally be passed to + * kbase_job_slot_hardstop(). + * + * In the event of @target_katom being NULL, this will check the last jobs that + * are likely to be running on the slot to see if a) they belong to kctx, and + * so would be stopped, and b) whether they have AFBC + * + * In that case, It's guaranteed that a job currently executing on the HW with + * AFBC will be detected. However, this is a conservative check because it also + * detects jobs that have just completed too. + * + * Return: true when hard-stop _might_ stop an afbc atom, else false. + */ +static bool kbasep_check_for_afbc_on_slot(struct kbase_device *kbdev, + struct kbase_context *kctx, int js, + struct kbase_jd_atom *target_katom) +{ + bool ret = false; + int i; + + lockdep_assert_held(&kbdev->hwaccess_lock); + + /* When we have an atom the decision can be made straight away. */ + if (target_katom) + return !!(target_katom->core_req & BASE_JD_REQ_FS_AFBC); + + /* Otherwise, we must chweck the hardware to see if it has atoms from + * this context with AFBC. */ + for (i = 0; i < kbase_backend_nr_atoms_on_slot(kbdev, js); i++) { + struct kbase_jd_atom *katom; + + katom = kbase_gpu_inspect(kbdev, js, i); + if (!katom) + continue; + + /* Ignore atoms from other contexts, they won't be stopped when + * we use this for checking if we should hard-stop them */ + if (katom->kctx != kctx) + continue; + + /* An atom on this slot and this context: check for AFBC */ + if (katom->core_req & BASE_JD_REQ_FS_AFBC) { + ret = true; + break; + } + } + + return ret; +} +#endif /* KBASE_GPU_RESET_EN */ + +/** + * kbase_job_slot_softstop_swflags - Soft-stop a job with flags + * @kbdev: The kbase device + * @js: The job slot to soft-stop + * @target_katom: The job that should be soft-stopped (or NULL for any job) + * @sw_flags: Flags to pass in about the soft-stop + * + * Context: + * The job slot lock must be held when calling this function. + * The job slot must not already be in the process of being soft-stopped. + * + * Soft-stop the specified job slot, with extra information about the stop + * + * Where possible any job in the next register is evicted before the soft-stop. + */ +void kbase_job_slot_softstop_swflags(struct kbase_device *kbdev, int js, + struct kbase_jd_atom *target_katom, u32 sw_flags) +{ + KBASE_DEBUG_ASSERT(!(sw_flags & JS_COMMAND_MASK)); + kbase_backend_soft_hard_stop_slot(kbdev, NULL, js, target_katom, + JS_COMMAND_SOFT_STOP | sw_flags); +} + +/** + * kbase_job_slot_softstop - Soft-stop the specified job slot + * @kbdev: The kbase device + * @js: The job slot to soft-stop + * @target_katom: The job that should be soft-stopped (or NULL for any job) + * Context: + * The job slot lock must be held when calling this function. + * The job slot must not already be in the process of being soft-stopped. + * + * Where possible any job in the next register is evicted before the soft-stop. + */ +void kbase_job_slot_softstop(struct kbase_device *kbdev, int js, + struct kbase_jd_atom *target_katom) +{ + kbase_job_slot_softstop_swflags(kbdev, js, target_katom, 0u); +} + +/** + * kbase_job_slot_hardstop - Hard-stop the specified job slot + * @kctx: The kbase context that contains the job(s) that should + * be hard-stopped + * @js: The job slot to hard-stop + * @target_katom: The job that should be hard-stopped (or NULL for all + * jobs from the context) + * Context: + * The job slot lock must be held when calling this function. + */ +void kbase_job_slot_hardstop(struct kbase_context *kctx, int js, + struct kbase_jd_atom *target_katom) +{ + struct kbase_device *kbdev = kctx->kbdev; + bool stopped; +#if KBASE_GPU_RESET_EN + /* We make the check for AFBC before evicting/stopping atoms. Note + * that no other thread can modify the slots whilst we have the + * hwaccess_lock. */ + int needs_workaround_for_afbc = + kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_T76X_3542) + && kbasep_check_for_afbc_on_slot(kbdev, kctx, js, + target_katom); +#endif + + stopped = kbase_backend_soft_hard_stop_slot(kbdev, kctx, js, + target_katom, + JS_COMMAND_HARD_STOP); +#if KBASE_GPU_RESET_EN + if (stopped && (kbase_hw_has_issue(kctx->kbdev, BASE_HW_ISSUE_8401) || + kbase_hw_has_issue(kctx->kbdev, BASE_HW_ISSUE_9510) || + needs_workaround_for_afbc)) { + /* MIDBASE-2916 if a fragment job with AFBC encoding is + * hardstopped, ensure to do a soft reset also in order to + * clear the GPU status. + * Workaround for HW issue 8401 has an issue,so after + * hard-stopping just reset the GPU. This will ensure that the + * jobs leave the GPU.*/ + if (kbase_prepare_to_reset_gpu_locked(kbdev)) { + dev_err(kbdev->dev, "Issueing GPU soft-reset after hard stopping due to hardware issue"); + kbase_reset_gpu_locked(kbdev); + } + } +#endif +} + +/** + * kbase_job_check_enter_disjoint - potentiall enter disjoint mode + * @kbdev: kbase device + * @action: the event which has occurred + * @core_reqs: core requirements of the atom + * @target_katom: the atom which is being affected + * + * For a certain soft/hard-stop action, work out whether to enter disjoint + * state. + * + * This does not register multiple disjoint events if the atom has already + * started a disjoint period + * + * @core_reqs can be supplied as 0 if the atom had not started on the hardware + * (and so a 'real' soft/hard-stop was not required, but it still interrupted + * flow, perhaps on another context) + * + * kbase_job_check_leave_disjoint() should be used to end the disjoint + * state when the soft/hard-stop action is complete + */ +void kbase_job_check_enter_disjoint(struct kbase_device *kbdev, u32 action, + base_jd_core_req core_reqs, struct kbase_jd_atom *target_katom) +{ + u32 hw_action = action & JS_COMMAND_MASK; + + /* For hard-stop, don't enter if hard-stop not allowed */ + if (hw_action == JS_COMMAND_HARD_STOP && + !kbasep_hard_stop_allowed(kbdev, core_reqs)) + return; + + /* For soft-stop, don't enter if soft-stop not allowed, or isn't + * causing disjoint */ + if (hw_action == JS_COMMAND_SOFT_STOP && + !(kbasep_soft_stop_allowed(kbdev, target_katom) && + (action & JS_COMMAND_SW_CAUSES_DISJOINT))) + return; + + /* Nothing to do if already logged disjoint state on this atom */ + if (target_katom->atom_flags & KBASE_KATOM_FLAG_IN_DISJOINT) + return; + + target_katom->atom_flags |= KBASE_KATOM_FLAG_IN_DISJOINT; + kbase_disjoint_state_up(kbdev); +} + +/** + * kbase_job_check_enter_disjoint - potentially leave disjoint state + * @kbdev: kbase device + * @target_katom: atom which is finishing + * + * Work out whether to leave disjoint state when finishing an atom that was + * originated by kbase_job_check_enter_disjoint(). + */ +void kbase_job_check_leave_disjoint(struct kbase_device *kbdev, + struct kbase_jd_atom *target_katom) +{ + if (target_katom->atom_flags & KBASE_KATOM_FLAG_IN_DISJOINT) { + target_katom->atom_flags &= ~KBASE_KATOM_FLAG_IN_DISJOINT; + kbase_disjoint_state_down(kbdev); + } +} + + +#if KBASE_GPU_RESET_EN +static void kbase_debug_dump_registers(struct kbase_device *kbdev) +{ + int i; + + kbase_io_history_dump(kbdev); + + dev_err(kbdev->dev, "Register state:"); + dev_err(kbdev->dev, " GPU_IRQ_RAWSTAT=0x%08x GPU_STATUS=0x%08x", + kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_RAWSTAT), NULL), + kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_STATUS), NULL)); + dev_err(kbdev->dev, " JOB_IRQ_RAWSTAT=0x%08x JOB_IRQ_JS_STATE=0x%08x", + kbase_reg_read(kbdev, JOB_CONTROL_REG(JOB_IRQ_RAWSTAT), NULL), + kbase_reg_read(kbdev, JOB_CONTROL_REG(JOB_IRQ_JS_STATE), NULL)); + for (i = 0; i < 3; i++) { + dev_err(kbdev->dev, " JS%d_STATUS=0x%08x JS%d_HEAD_LO=0x%08x", + i, kbase_reg_read(kbdev, JOB_SLOT_REG(i, JS_STATUS), + NULL), + i, kbase_reg_read(kbdev, JOB_SLOT_REG(i, JS_HEAD_LO), + NULL)); + } + dev_err(kbdev->dev, " MMU_IRQ_RAWSTAT=0x%08x GPU_FAULTSTATUS=0x%08x", + kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_RAWSTAT), NULL), + kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_FAULTSTATUS), NULL)); + dev_err(kbdev->dev, " GPU_IRQ_MASK=0x%08x JOB_IRQ_MASK=0x%08x MMU_IRQ_MASK=0x%08x", + kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), NULL), + kbase_reg_read(kbdev, JOB_CONTROL_REG(JOB_IRQ_MASK), NULL), + kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_MASK), NULL)); + dev_err(kbdev->dev, " PWR_OVERRIDE0=0x%08x PWR_OVERRIDE1=0x%08x", + kbase_reg_read(kbdev, GPU_CONTROL_REG(PWR_OVERRIDE0), NULL), + kbase_reg_read(kbdev, GPU_CONTROL_REG(PWR_OVERRIDE1), NULL)); + dev_err(kbdev->dev, " SHADER_CONFIG=0x%08x L2_MMU_CONFIG=0x%08x", + kbase_reg_read(kbdev, GPU_CONTROL_REG(SHADER_CONFIG), NULL), + kbase_reg_read(kbdev, GPU_CONTROL_REG(L2_MMU_CONFIG), NULL)); + dev_err(kbdev->dev, " TILER_CONFIG=0x%08x JM_CONFIG=0x%08x", + kbase_reg_read(kbdev, GPU_CONTROL_REG(TILER_CONFIG), NULL), + kbase_reg_read(kbdev, GPU_CONTROL_REG(JM_CONFIG), NULL)); +} + +static void kbasep_reset_timeout_worker(struct work_struct *data) +{ + unsigned long flags; + struct kbase_device *kbdev; + ktime_t end_timestamp = ktime_get(); + struct kbasep_js_device_data *js_devdata; + bool try_schedule = false; + bool silent = false; + u32 max_loops = KBASE_CLEAN_CACHE_MAX_LOOPS; + + KBASE_DEBUG_ASSERT(data); + + kbdev = container_of(data, struct kbase_device, + hwaccess.backend.reset_work); + + KBASE_DEBUG_ASSERT(kbdev); + js_devdata = &kbdev->js_data; + + if (atomic_read(&kbdev->hwaccess.backend.reset_gpu) == + KBASE_RESET_GPU_SILENT) + silent = true; + + KBASE_TRACE_ADD(kbdev, JM_BEGIN_RESET_WORKER, NULL, NULL, 0u, 0); + + /* Suspend vinstr. + * This call will block until vinstr is suspended. */ + kbase_vinstr_suspend(kbdev->vinstr_ctx); + + /* Make sure the timer has completed - this cannot be done from + * interrupt context, so this cannot be done within + * kbasep_try_reset_gpu_early. */ + hrtimer_cancel(&kbdev->hwaccess.backend.reset_timer); + + if (kbase_pm_context_active_handle_suspend(kbdev, + KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE)) { + /* This would re-activate the GPU. Since it's already idle, + * there's no need to reset it */ + atomic_set(&kbdev->hwaccess.backend.reset_gpu, + KBASE_RESET_GPU_NOT_PENDING); + kbase_disjoint_state_down(kbdev); + wake_up(&kbdev->hwaccess.backend.reset_wait); + kbase_vinstr_resume(kbdev->vinstr_ctx); + return; + } + + KBASE_DEBUG_ASSERT(kbdev->irq_reset_flush == false); + + spin_lock_irqsave(&kbdev->hwcnt.lock, flags); + spin_lock(&kbdev->hwaccess_lock); + spin_lock(&kbdev->mmu_mask_change); + /* We're about to flush out the IRQs and their bottom half's */ + kbdev->irq_reset_flush = true; + + /* Disable IRQ to avoid IRQ handlers to kick in after releasing the + * spinlock; this also clears any outstanding interrupts */ + kbase_pm_disable_interrupts_nolock(kbdev); + + spin_unlock(&kbdev->mmu_mask_change); + spin_unlock(&kbdev->hwaccess_lock); + spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags); + + /* Ensure that any IRQ handlers have finished + * Must be done without any locks IRQ handlers will take */ + kbase_synchronize_irqs(kbdev); + + /* Flush out any in-flight work items */ + kbase_flush_mmu_wqs(kbdev); + + /* The flush has completed so reset the active indicator */ + kbdev->irq_reset_flush = false; + + if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_TMIX_8463)) { + /* Ensure that L2 is not transitioning when we send the reset + * command */ + while (--max_loops && kbase_pm_get_trans_cores(kbdev, + KBASE_PM_CORE_L2)) + ; + + WARN(!max_loops, "L2 power transition timed out while trying to reset\n"); + } + + mutex_lock(&kbdev->pm.lock); + /* We hold the pm lock, so there ought to be a current policy */ + KBASE_DEBUG_ASSERT(kbdev->pm.backend.pm_current_policy); + + /* All slot have been soft-stopped and we've waited + * SOFT_STOP_RESET_TIMEOUT for the slots to clear, at this point we + * assume that anything that is still left on the GPU is stuck there and + * we'll kill it when we reset the GPU */ + + if (!silent) + dev_err(kbdev->dev, "Resetting GPU (allowing up to %d ms)", + RESET_TIMEOUT); + + /* Output the state of some interesting registers to help in the + * debugging of GPU resets */ + if (!silent) + kbase_debug_dump_registers(kbdev); + + /* Complete any jobs that were still on the GPU */ + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); + kbdev->protected_mode = false; + kbase_backend_reset(kbdev, &end_timestamp); + kbase_pm_metrics_update(kbdev, NULL); + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); + + /* Reset the GPU */ + kbase_pm_init_hw(kbdev, 0); + + mutex_unlock(&kbdev->pm.lock); + + mutex_lock(&js_devdata->runpool_mutex); + + mutex_lock(&kbdev->mmu_hw_mutex); + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); + kbase_ctx_sched_restore_all_as(kbdev); + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); + mutex_unlock(&kbdev->mmu_hw_mutex); + + kbase_pm_enable_interrupts(kbdev); + + atomic_set(&kbdev->hwaccess.backend.reset_gpu, + KBASE_RESET_GPU_NOT_PENDING); + + kbase_disjoint_state_down(kbdev); + + wake_up(&kbdev->hwaccess.backend.reset_wait); + if (!silent) + dev_err(kbdev->dev, "Reset complete"); + + if (js_devdata->nr_contexts_pullable > 0 && !kbdev->poweroff_pending) + try_schedule = true; + + mutex_unlock(&js_devdata->runpool_mutex); + + mutex_lock(&kbdev->pm.lock); + + /* Find out what cores are required now */ + kbase_pm_update_cores_state(kbdev); + + /* Synchronously request and wait for those cores, because if + * instrumentation is enabled it would need them immediately. */ + kbase_pm_check_transitions_sync(kbdev); + + mutex_unlock(&kbdev->pm.lock); + + /* Try submitting some jobs to restart processing */ + if (try_schedule) { + KBASE_TRACE_ADD(kbdev, JM_SUBMIT_AFTER_RESET, NULL, NULL, 0u, + 0); + kbase_js_sched_all(kbdev); + } + + /* Process any pending slot updates */ + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); + kbase_backend_slot_update(kbdev); + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); + + kbase_pm_context_idle(kbdev); + + /* Release vinstr */ + kbase_vinstr_resume(kbdev->vinstr_ctx); + + KBASE_TRACE_ADD(kbdev, JM_END_RESET_WORKER, NULL, NULL, 0u, 0); +} + +static enum hrtimer_restart kbasep_reset_timer_callback(struct hrtimer *timer) +{ + struct kbase_device *kbdev = container_of(timer, struct kbase_device, + hwaccess.backend.reset_timer); + + KBASE_DEBUG_ASSERT(kbdev); + + /* Reset still pending? */ + if (atomic_cmpxchg(&kbdev->hwaccess.backend.reset_gpu, + KBASE_RESET_GPU_COMMITTED, KBASE_RESET_GPU_HAPPENING) == + KBASE_RESET_GPU_COMMITTED) + queue_work(kbdev->hwaccess.backend.reset_workq, + &kbdev->hwaccess.backend.reset_work); + + return HRTIMER_NORESTART; +} + +/* + * If all jobs are evicted from the GPU then we can reset the GPU + * immediately instead of waiting for the timeout to elapse + */ + +static void kbasep_try_reset_gpu_early_locked(struct kbase_device *kbdev) +{ + int i; + int pending_jobs = 0; + + KBASE_DEBUG_ASSERT(kbdev); + + /* Count the number of jobs */ + for (i = 0; i < kbdev->gpu_props.num_job_slots; i++) + pending_jobs += kbase_backend_nr_atoms_submitted(kbdev, i); + + if (pending_jobs > 0) { + /* There are still jobs on the GPU - wait */ + return; + } + + /* To prevent getting incorrect registers when dumping failed job, + * skip early reset. + */ + if (kbdev->job_fault_debug != false) + return; + + /* Check that the reset has been committed to (i.e. kbase_reset_gpu has + * been called), and that no other thread beat this thread to starting + * the reset */ + if (atomic_cmpxchg(&kbdev->hwaccess.backend.reset_gpu, + KBASE_RESET_GPU_COMMITTED, KBASE_RESET_GPU_HAPPENING) != + KBASE_RESET_GPU_COMMITTED) { + /* Reset has already occurred */ + return; + } + + queue_work(kbdev->hwaccess.backend.reset_workq, + &kbdev->hwaccess.backend.reset_work); +} + +static void kbasep_try_reset_gpu_early(struct kbase_device *kbdev) +{ + unsigned long flags; + + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); + kbasep_try_reset_gpu_early_locked(kbdev); + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); +} + +/** + * kbase_prepare_to_reset_gpu_locked - Prepare for resetting the GPU + * @kbdev: kbase device + * + * This function just soft-stops all the slots to ensure that as many jobs as + * possible are saved. + * + * Return: + * The function returns a boolean which should be interpreted as follows: + * true - Prepared for reset, kbase_reset_gpu_locked should be called. + * false - Another thread is performing a reset, kbase_reset_gpu should + * not be called. + */ +bool kbase_prepare_to_reset_gpu_locked(struct kbase_device *kbdev) +{ + int i; + + KBASE_DEBUG_ASSERT(kbdev); + + if (atomic_cmpxchg(&kbdev->hwaccess.backend.reset_gpu, + KBASE_RESET_GPU_NOT_PENDING, + KBASE_RESET_GPU_PREPARED) != + KBASE_RESET_GPU_NOT_PENDING) { + /* Some other thread is already resetting the GPU */ + return false; + } + + kbase_disjoint_state_up(kbdev); + + for (i = 0; i < kbdev->gpu_props.num_job_slots; i++) + kbase_job_slot_softstop(kbdev, i, NULL); + + return true; +} + +bool kbase_prepare_to_reset_gpu(struct kbase_device *kbdev) +{ + unsigned long flags; + bool ret; + + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); + ret = kbase_prepare_to_reset_gpu_locked(kbdev); + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); + + return ret; +} +KBASE_EXPORT_TEST_API(kbase_prepare_to_reset_gpu); + +/* + * This function should be called after kbase_prepare_to_reset_gpu if it + * returns true. It should never be called without a corresponding call to + * kbase_prepare_to_reset_gpu. + * + * After this function is called (or not called if kbase_prepare_to_reset_gpu + * returned false), the caller should wait for + * kbdev->hwaccess.backend.reset_waitq to be signalled to know when the reset + * has completed. + */ +void kbase_reset_gpu(struct kbase_device *kbdev) +{ + KBASE_DEBUG_ASSERT(kbdev); + + /* Note this is an assert/atomic_set because it is a software issue for + * a race to be occuring here */ + KBASE_DEBUG_ASSERT(atomic_read(&kbdev->hwaccess.backend.reset_gpu) == + KBASE_RESET_GPU_PREPARED); + atomic_set(&kbdev->hwaccess.backend.reset_gpu, + KBASE_RESET_GPU_COMMITTED); + + dev_err(kbdev->dev, "Preparing to soft-reset GPU: Waiting (upto %d ms) for all jobs to complete soft-stop\n", + kbdev->reset_timeout_ms); + + hrtimer_start(&kbdev->hwaccess.backend.reset_timer, + HR_TIMER_DELAY_MSEC(kbdev->reset_timeout_ms), + HRTIMER_MODE_REL); + + /* Try resetting early */ + kbasep_try_reset_gpu_early(kbdev); +} +KBASE_EXPORT_TEST_API(kbase_reset_gpu); + +void kbase_reset_gpu_locked(struct kbase_device *kbdev) +{ + KBASE_DEBUG_ASSERT(kbdev); + + /* Note this is an assert/atomic_set because it is a software issue for + * a race to be occuring here */ + KBASE_DEBUG_ASSERT(atomic_read(&kbdev->hwaccess.backend.reset_gpu) == + KBASE_RESET_GPU_PREPARED); + atomic_set(&kbdev->hwaccess.backend.reset_gpu, + KBASE_RESET_GPU_COMMITTED); + + dev_err(kbdev->dev, "Preparing to soft-reset GPU: Waiting (upto %d ms) for all jobs to complete soft-stop\n", + kbdev->reset_timeout_ms); + hrtimer_start(&kbdev->hwaccess.backend.reset_timer, + HR_TIMER_DELAY_MSEC(kbdev->reset_timeout_ms), + HRTIMER_MODE_REL); + + /* Try resetting early */ + kbasep_try_reset_gpu_early_locked(kbdev); +} + +void kbase_reset_gpu_silent(struct kbase_device *kbdev) +{ + if (atomic_cmpxchg(&kbdev->hwaccess.backend.reset_gpu, + KBASE_RESET_GPU_NOT_PENDING, + KBASE_RESET_GPU_SILENT) != + KBASE_RESET_GPU_NOT_PENDING) { + /* Some other thread is already resetting the GPU */ + return; + } + + kbase_disjoint_state_up(kbdev); + + queue_work(kbdev->hwaccess.backend.reset_workq, + &kbdev->hwaccess.backend.reset_work); +} + +bool kbase_reset_gpu_active(struct kbase_device *kbdev) +{ + if (atomic_read(&kbdev->hwaccess.backend.reset_gpu) == + KBASE_RESET_GPU_NOT_PENDING) + return false; + + return true; +} +#endif /* KBASE_GPU_RESET_EN */ diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_internal.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_internal.h new file mode 100644 index 00000000000000..1f382b3c1af41f --- /dev/null +++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_internal.h @@ -0,0 +1,164 @@ +/* + * + * (C) COPYRIGHT 2011-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + + +/* + * Job Manager backend-specific low-level APIs. + */ + +#ifndef _KBASE_JM_HWACCESS_H_ +#define _KBASE_JM_HWACCESS_H_ + +#include +#include +#include + +#include + +/** + * kbase_job_submit_nolock() - Submit a job to a certain job-slot + * @kbdev: Device pointer + * @katom: Atom to submit + * @js: Job slot to submit on + * + * The caller must check kbasep_jm_is_submit_slots_free() != false before + * calling this. + * + * The following locking conditions are made on the caller: + * - it must hold the hwaccess_lock + */ +void kbase_job_submit_nolock(struct kbase_device *kbdev, + struct kbase_jd_atom *katom, int js); + +/** + * kbase_job_done_slot() - Complete the head job on a particular job-slot + * @kbdev: Device pointer + * @s: Job slot + * @completion_code: Completion code of job reported by GPU + * @job_tail: Job tail address reported by GPU + * @end_timestamp: Timestamp of job completion + */ +void kbase_job_done_slot(struct kbase_device *kbdev, int s, u32 completion_code, + u64 job_tail, ktime_t *end_timestamp); + +#ifdef CONFIG_GPU_TRACEPOINTS +static inline char *kbasep_make_job_slot_string(int js, char *js_string, + size_t js_size) +{ + snprintf(js_string, js_size, "job_slot_%i", js); + return js_string; +} +#endif + +/** + * kbase_job_hw_submit() - Submit a job to the GPU + * @kbdev: Device pointer + * @katom: Atom to submit + * @js: Job slot to submit on + * + * The caller must check kbasep_jm_is_submit_slots_free() != false before + * calling this. + * + * The following locking conditions are made on the caller: + * - it must hold the hwaccess_lock + */ +void kbase_job_hw_submit(struct kbase_device *kbdev, + struct kbase_jd_atom *katom, + int js); + +/** + * kbasep_job_slot_soft_or_hard_stop_do_action() - Perform a soft or hard stop + * on the specified atom + * @kbdev: Device pointer + * @js: Job slot to stop on + * @action: The action to perform, either JSn_COMMAND_HARD_STOP or + * JSn_COMMAND_SOFT_STOP + * @core_reqs: Core requirements of atom to stop + * @target_katom: Atom to stop + * + * The following locking conditions are made on the caller: + * - it must hold the hwaccess_lock + */ +void kbasep_job_slot_soft_or_hard_stop_do_action(struct kbase_device *kbdev, + int js, + u32 action, + base_jd_core_req core_reqs, + struct kbase_jd_atom *target_katom); + +/** + * kbase_backend_soft_hard_stop_slot() - Soft or hard stop jobs on a given job + * slot belonging to a given context. + * @kbdev: Device pointer + * @kctx: Context pointer. May be NULL + * @katom: Specific atom to stop. May be NULL + * @js: Job slot to hard stop + * @action: The action to perform, either JSn_COMMAND_HARD_STOP or + * JSn_COMMAND_SOFT_STOP + * + * If no context is provided then all jobs on the slot will be soft or hard + * stopped. + * + * If a katom is provided then only that specific atom will be stopped. In this + * case the kctx parameter is ignored. + * + * Jobs that are on the slot but are not yet on the GPU will be unpulled and + * returned to the job scheduler. + * + * Return: true if an atom was stopped, false otherwise + */ +bool kbase_backend_soft_hard_stop_slot(struct kbase_device *kbdev, + struct kbase_context *kctx, + int js, + struct kbase_jd_atom *katom, + u32 action); + +/** + * kbase_job_slot_init - Initialise job slot framework + * @kbdev: Device pointer + * + * Called on driver initialisation + * + * Return: 0 on success + */ +int kbase_job_slot_init(struct kbase_device *kbdev); + +/** + * kbase_job_slot_halt - Halt the job slot framework + * @kbdev: Device pointer + * + * Should prevent any further job slot processing + */ +void kbase_job_slot_halt(struct kbase_device *kbdev); + +/** + * kbase_job_slot_term - Terminate job slot framework + * @kbdev: Device pointer + * + * Called on driver termination + */ +void kbase_job_slot_term(struct kbase_device *kbdev); + +/** + * kbase_gpu_cacheclean - Cause a GPU cache clean & flush + * @kbdev: Device pointer + * + * Caller must not be in IRQ context + */ +void kbase_gpu_cacheclean(struct kbase_device *kbdev); + +#endif /* _KBASE_JM_HWACCESS_H_ */ diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_rb.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_rb.c new file mode 100644 index 00000000000000..a41e7b5b7afbff --- /dev/null +++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_rb.c @@ -0,0 +1,1947 @@ +/* + * + * (C) COPYRIGHT 2014-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + +/* + * Register-based HW access backend specific APIs + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Return whether the specified ringbuffer is empty. HW access lock must be + * held */ +#define SLOT_RB_EMPTY(rb) (rb->write_idx == rb->read_idx) +/* Return number of atoms currently in the specified ringbuffer. HW access lock + * must be held */ +#define SLOT_RB_ENTRIES(rb) (int)(s8)(rb->write_idx - rb->read_idx) + +static void kbase_gpu_release_atom(struct kbase_device *kbdev, + struct kbase_jd_atom *katom, + ktime_t *end_timestamp); + +/** + * kbase_gpu_enqueue_atom - Enqueue an atom in the HW access ringbuffer + * @kbdev: Device pointer + * @katom: Atom to enqueue + * + * Context: Caller must hold the HW access lock + */ +static void kbase_gpu_enqueue_atom(struct kbase_device *kbdev, + struct kbase_jd_atom *katom) +{ + struct slot_rb *rb = &kbdev->hwaccess.backend.slot_rb[katom->slot_nr]; + + WARN_ON(SLOT_RB_ENTRIES(rb) >= SLOT_RB_SIZE); + + lockdep_assert_held(&kbdev->hwaccess_lock); + + rb->entries[rb->write_idx & SLOT_RB_MASK].katom = katom; + rb->write_idx++; + + katom->gpu_rb_state = KBASE_ATOM_GPU_RB_WAITING_BLOCKED; +} + +/** + * kbase_gpu_dequeue_atom - Remove an atom from the HW access ringbuffer, once + * it has been completed + * @kbdev: Device pointer + * @js: Job slot to remove atom from + * @end_timestamp: Pointer to timestamp of atom completion. May be NULL, in + * which case current time will be used. + * + * Context: Caller must hold the HW access lock + * + * Return: Atom removed from ringbuffer + */ +static struct kbase_jd_atom *kbase_gpu_dequeue_atom(struct kbase_device *kbdev, + int js, + ktime_t *end_timestamp) +{ + struct slot_rb *rb = &kbdev->hwaccess.backend.slot_rb[js]; + struct kbase_jd_atom *katom; + + if (SLOT_RB_EMPTY(rb)) { + WARN(1, "GPU ringbuffer unexpectedly empty\n"); + return NULL; + } + + lockdep_assert_held(&kbdev->hwaccess_lock); + + katom = rb->entries[rb->read_idx & SLOT_RB_MASK].katom; + + kbase_gpu_release_atom(kbdev, katom, end_timestamp); + + rb->read_idx++; + + katom->gpu_rb_state = KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB; + + kbase_js_debug_log_current_affinities(kbdev); + + return katom; +} + +struct kbase_jd_atom *kbase_gpu_inspect(struct kbase_device *kbdev, int js, + int idx) +{ + struct slot_rb *rb = &kbdev->hwaccess.backend.slot_rb[js]; + + lockdep_assert_held(&kbdev->hwaccess_lock); + + if ((SLOT_RB_ENTRIES(rb) - 1) < idx) + return NULL; /* idx out of range */ + + return rb->entries[(rb->read_idx + idx) & SLOT_RB_MASK].katom; +} + +struct kbase_jd_atom *kbase_backend_inspect_head(struct kbase_device *kbdev, + int js) +{ + return kbase_gpu_inspect(kbdev, js, 0); +} + +struct kbase_jd_atom *kbase_backend_inspect_tail(struct kbase_device *kbdev, + int js) +{ + struct slot_rb *rb = &kbdev->hwaccess.backend.slot_rb[js]; + + if (SLOT_RB_EMPTY(rb)) + return NULL; + + return rb->entries[(rb->write_idx - 1) & SLOT_RB_MASK].katom; +} + +/** + * kbase_gpu_atoms_submitted - Inspect whether a slot has any atoms currently + * on the GPU + * @kbdev: Device pointer + * @js: Job slot to inspect + * + * Return: true if there are atoms on the GPU for slot js, + * false otherwise + */ +static bool kbase_gpu_atoms_submitted(struct kbase_device *kbdev, int js) +{ + int i; + + lockdep_assert_held(&kbdev->hwaccess_lock); + + for (i = 0; i < SLOT_RB_SIZE; i++) { + struct kbase_jd_atom *katom = kbase_gpu_inspect(kbdev, js, i); + + if (!katom) + return false; + if (katom->gpu_rb_state == KBASE_ATOM_GPU_RB_SUBMITTED || + katom->gpu_rb_state == KBASE_ATOM_GPU_RB_READY) + return true; + } + + return false; +} + +/** + * kbase_gpu_atoms_submitted_any() - Inspect whether there are any atoms + * currently on the GPU + * @kbdev: Device pointer + * + * Return: true if there are any atoms on the GPU, false otherwise + */ +static bool kbase_gpu_atoms_submitted_any(struct kbase_device *kbdev) +{ + int js; + int i; + + lockdep_assert_held(&kbdev->hwaccess_lock); + + for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) { + for (i = 0; i < SLOT_RB_SIZE; i++) { + struct kbase_jd_atom *katom = kbase_gpu_inspect(kbdev, js, i); + + if (katom && katom->gpu_rb_state == KBASE_ATOM_GPU_RB_SUBMITTED) + return true; + } + } + return false; +} + +int kbase_backend_nr_atoms_submitted(struct kbase_device *kbdev, int js) +{ + int nr = 0; + int i; + + lockdep_assert_held(&kbdev->hwaccess_lock); + + for (i = 0; i < SLOT_RB_SIZE; i++) { + struct kbase_jd_atom *katom = kbase_gpu_inspect(kbdev, js, i); + + if (katom && (katom->gpu_rb_state == + KBASE_ATOM_GPU_RB_SUBMITTED)) + nr++; + } + + return nr; +} + +int kbase_backend_nr_atoms_on_slot(struct kbase_device *kbdev, int js) +{ + int nr = 0; + int i; + + lockdep_assert_held(&kbdev->hwaccess_lock); + + for (i = 0; i < SLOT_RB_SIZE; i++) { + if (kbase_gpu_inspect(kbdev, js, i)) + nr++; + } + + return nr; +} + +static int kbase_gpu_nr_atoms_on_slot_min(struct kbase_device *kbdev, int js, + enum kbase_atom_gpu_rb_state min_rb_state) +{ + int nr = 0; + int i; + + lockdep_assert_held(&kbdev->hwaccess_lock); + + for (i = 0; i < SLOT_RB_SIZE; i++) { + struct kbase_jd_atom *katom = kbase_gpu_inspect(kbdev, js, i); + + if (katom && (katom->gpu_rb_state >= min_rb_state)) + nr++; + } + + return nr; +} + +/** + * check_secure_atom - Check if the given atom is in the given secure state and + * has a ringbuffer state of at least + * KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_TRANSITION + * @katom: Atom pointer + * @secure: Desired secure state + * + * Return: true if atom is in the given state, false otherwise + */ +static bool check_secure_atom(struct kbase_jd_atom *katom, bool secure) +{ + if (katom->gpu_rb_state >= + KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_TRANSITION && + ((kbase_jd_katom_is_protected(katom) && secure) || + (!kbase_jd_katom_is_protected(katom) && !secure))) + return true; + + return false; +} + +/** + * kbase_gpu_check_secure_atoms - Check if there are any atoms in the given + * secure state in the ringbuffers of at least + * state + * KBASE_ATOM_GPU_RB_WAITING_FOR_CORE_AVAILABLE + * @kbdev: Device pointer + * @secure: Desired secure state + * + * Return: true if any atoms are in the given state, false otherwise + */ +static bool kbase_gpu_check_secure_atoms(struct kbase_device *kbdev, + bool secure) +{ + int js, i; + + for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) { + for (i = 0; i < SLOT_RB_SIZE; i++) { + struct kbase_jd_atom *katom = kbase_gpu_inspect(kbdev, + js, i); + + if (katom) { + if (check_secure_atom(katom, secure)) + return true; + } + } + } + + return false; +} + +int kbase_backend_slot_free(struct kbase_device *kbdev, int js) +{ + if (atomic_read(&kbdev->hwaccess.backend.reset_gpu) != + KBASE_RESET_GPU_NOT_PENDING) { + /* The GPU is being reset - so prevent submission */ + return 0; + } + + return SLOT_RB_SIZE - kbase_backend_nr_atoms_on_slot(kbdev, js); +} + + +static void kbasep_js_job_check_deref_cores(struct kbase_device *kbdev, + struct kbase_jd_atom *katom); + +static bool kbasep_js_job_check_ref_cores(struct kbase_device *kbdev, + int js, + struct kbase_jd_atom *katom) +{ + /* The most recently checked affinity. Having this at this scope allows + * us to guarantee that we've checked the affinity in this function + * call. + */ + u64 recently_chosen_affinity = 0; + bool chosen_affinity = false; + bool retry; + + do { + retry = false; + + /* NOTE: The following uses a number of FALLTHROUGHs to optimize + * the calls to this function. Ending of the function is + * indicated by BREAK OUT */ + switch (katom->coreref_state) { + /* State when job is first attempted to be run */ + case KBASE_ATOM_COREREF_STATE_NO_CORES_REQUESTED: + KBASE_DEBUG_ASSERT(katom->affinity == 0); + + /* Compute affinity */ + if (false == kbase_js_choose_affinity( + &recently_chosen_affinity, kbdev, katom, + js)) { + /* No cores are currently available */ + /* *** BREAK OUT: No state transition *** */ + break; + } + + chosen_affinity = true; + + /* Request the cores */ + kbase_pm_request_cores(kbdev, + katom->core_req & BASE_JD_REQ_T, + recently_chosen_affinity); + + katom->affinity = recently_chosen_affinity; + + /* Proceed to next state */ + katom->coreref_state = + KBASE_ATOM_COREREF_STATE_WAITING_FOR_REQUESTED_CORES; + + /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */ + + case KBASE_ATOM_COREREF_STATE_WAITING_FOR_REQUESTED_CORES: + { + enum kbase_pm_cores_ready cores_ready; + + KBASE_DEBUG_ASSERT(katom->affinity != 0 || + (katom->core_req & BASE_JD_REQ_T)); + + cores_ready = kbase_pm_register_inuse_cores( + kbdev, + katom->core_req & BASE_JD_REQ_T, + katom->affinity); + if (cores_ready == KBASE_NEW_AFFINITY) { + /* Affinity no longer valid - return to + * previous state */ + kbasep_js_job_check_deref_cores(kbdev, + katom); + KBASE_TRACE_ADD_SLOT_INFO(kbdev, + JS_CORE_REF_REGISTER_INUSE_FAILED, + katom->kctx, katom, + katom->jc, js, + (u32) katom->affinity); + /* *** BREAK OUT: Return to previous + * state, retry *** */ + retry = true; + break; + } + if (cores_ready == KBASE_CORES_NOT_READY) { + /* Stay in this state and return, to + * retry at this state later */ + KBASE_TRACE_ADD_SLOT_INFO(kbdev, + JS_CORE_REF_REGISTER_INUSE_FAILED, + katom->kctx, katom, + katom->jc, js, + (u32) katom->affinity); + /* *** BREAK OUT: No state transition + * *** */ + break; + } + /* Proceed to next state */ + katom->coreref_state = + KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY; + } + + /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */ + + case KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY: + KBASE_DEBUG_ASSERT(katom->affinity != 0 || + (katom->core_req & BASE_JD_REQ_T)); + + /* Optimize out choosing the affinity twice in the same + * function call */ + if (chosen_affinity == false) { + /* See if the affinity changed since a previous + * call. */ + if (false == kbase_js_choose_affinity( + &recently_chosen_affinity, + kbdev, katom, js)) { + /* No cores are currently available */ + kbasep_js_job_check_deref_cores(kbdev, + katom); + KBASE_TRACE_ADD_SLOT_INFO(kbdev, + JS_CORE_REF_REQUEST_ON_RECHECK_FAILED, + katom->kctx, katom, + katom->jc, js, + (u32) recently_chosen_affinity); + /* *** BREAK OUT: Transition to lower + * state *** */ + break; + } + chosen_affinity = true; + } + + /* Now see if this requires a different set of cores */ + if (recently_chosen_affinity != katom->affinity) { + enum kbase_pm_cores_ready cores_ready; + + kbase_pm_request_cores(kbdev, + katom->core_req & BASE_JD_REQ_T, + recently_chosen_affinity); + + /* Register new cores whilst we still hold the + * old ones, to minimize power transitions */ + cores_ready = + kbase_pm_register_inuse_cores(kbdev, + katom->core_req & BASE_JD_REQ_T, + recently_chosen_affinity); + kbasep_js_job_check_deref_cores(kbdev, katom); + + /* Fixup the state that was reduced by + * deref_cores: */ + katom->coreref_state = + KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY; + katom->affinity = recently_chosen_affinity; + if (cores_ready == KBASE_NEW_AFFINITY) { + /* Affinity no longer valid - return to + * previous state */ + katom->coreref_state = + KBASE_ATOM_COREREF_STATE_WAITING_FOR_REQUESTED_CORES; + + kbasep_js_job_check_deref_cores(kbdev, + katom); + + KBASE_TRACE_ADD_SLOT_INFO(kbdev, + JS_CORE_REF_REGISTER_INUSE_FAILED, + katom->kctx, katom, + katom->jc, js, + (u32) katom->affinity); + /* *** BREAK OUT: Return to previous + * state, retry *** */ + retry = true; + break; + } + /* Now might be waiting for powerup again, with + * a new affinity */ + if (cores_ready == KBASE_CORES_NOT_READY) { + /* Return to previous state */ + katom->coreref_state = + KBASE_ATOM_COREREF_STATE_WAITING_FOR_REQUESTED_CORES; + KBASE_TRACE_ADD_SLOT_INFO(kbdev, + JS_CORE_REF_REGISTER_ON_RECHECK_FAILED, + katom->kctx, katom, + katom->jc, js, + (u32) katom->affinity); + /* *** BREAK OUT: Transition to lower + * state *** */ + break; + } + } + /* Proceed to next state */ + katom->coreref_state = + KBASE_ATOM_COREREF_STATE_CHECK_AFFINITY_VIOLATIONS; + + /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */ + case KBASE_ATOM_COREREF_STATE_CHECK_AFFINITY_VIOLATIONS: + KBASE_DEBUG_ASSERT(katom->affinity != 0 || + (katom->core_req & BASE_JD_REQ_T)); + KBASE_DEBUG_ASSERT(katom->affinity == + recently_chosen_affinity); + + /* Note: this is where the caller must've taken the + * hwaccess_lock */ + + /* Check for affinity violations - if there are any, + * then we just ask the caller to requeue and try again + * later */ + if (kbase_js_affinity_would_violate(kbdev, js, + katom->affinity) != false) { + /* Return to previous state */ + katom->coreref_state = + KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY; + /* *** BREAK OUT: Transition to lower state *** + */ + KBASE_TRACE_ADD_SLOT_INFO(kbdev, + JS_CORE_REF_AFFINITY_WOULD_VIOLATE, + katom->kctx, katom, katom->jc, js, + (u32) katom->affinity); + break; + } + + /* No affinity violations would result, so the cores are + * ready */ + katom->coreref_state = KBASE_ATOM_COREREF_STATE_READY; + /* *** BREAK OUT: Cores Ready *** */ + break; + + default: + KBASE_DEBUG_ASSERT_MSG(false, + "Unhandled kbase_atom_coreref_state %d", + katom->coreref_state); + break; + } + } while (retry != false); + + return (katom->coreref_state == KBASE_ATOM_COREREF_STATE_READY); +} + +static void kbasep_js_job_check_deref_cores(struct kbase_device *kbdev, + struct kbase_jd_atom *katom) +{ + KBASE_DEBUG_ASSERT(kbdev != NULL); + KBASE_DEBUG_ASSERT(katom != NULL); + + switch (katom->coreref_state) { + case KBASE_ATOM_COREREF_STATE_READY: + /* State where atom was submitted to the HW - just proceed to + * power-down */ + KBASE_DEBUG_ASSERT(katom->affinity != 0 || + (katom->core_req & BASE_JD_REQ_T)); + + /* *** FALLTHROUGH *** */ + + case KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY: + /* State where cores were registered */ + KBASE_DEBUG_ASSERT(katom->affinity != 0 || + (katom->core_req & BASE_JD_REQ_T)); + kbase_pm_release_cores(kbdev, katom->core_req & BASE_JD_REQ_T, + katom->affinity); + + break; + + case KBASE_ATOM_COREREF_STATE_WAITING_FOR_REQUESTED_CORES: + /* State where cores were requested, but not registered */ + KBASE_DEBUG_ASSERT(katom->affinity != 0 || + (katom->core_req & BASE_JD_REQ_T)); + kbase_pm_unrequest_cores(kbdev, katom->core_req & BASE_JD_REQ_T, + katom->affinity); + break; + + case KBASE_ATOM_COREREF_STATE_NO_CORES_REQUESTED: + /* Initial state - nothing required */ + KBASE_DEBUG_ASSERT(katom->affinity == 0); + break; + + default: + KBASE_DEBUG_ASSERT_MSG(false, + "Unhandled coreref_state: %d", + katom->coreref_state); + break; + } + + katom->affinity = 0; + katom->coreref_state = KBASE_ATOM_COREREF_STATE_NO_CORES_REQUESTED; +} + +static void kbasep_js_job_check_deref_cores_nokatom(struct kbase_device *kbdev, + base_jd_core_req core_req, u64 affinity, + enum kbase_atom_coreref_state coreref_state) +{ + KBASE_DEBUG_ASSERT(kbdev != NULL); + + switch (coreref_state) { + case KBASE_ATOM_COREREF_STATE_READY: + /* State where atom was submitted to the HW - just proceed to + * power-down */ + KBASE_DEBUG_ASSERT(affinity != 0 || + (core_req & BASE_JD_REQ_T)); + + /* *** FALLTHROUGH *** */ + + case KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY: + /* State where cores were registered */ + KBASE_DEBUG_ASSERT(affinity != 0 || + (core_req & BASE_JD_REQ_T)); + kbase_pm_release_cores(kbdev, core_req & BASE_JD_REQ_T, + affinity); + + break; + + case KBASE_ATOM_COREREF_STATE_WAITING_FOR_REQUESTED_CORES: + /* State where cores were requested, but not registered */ + KBASE_DEBUG_ASSERT(affinity != 0 || + (core_req & BASE_JD_REQ_T)); + kbase_pm_unrequest_cores(kbdev, core_req & BASE_JD_REQ_T, + affinity); + break; + + case KBASE_ATOM_COREREF_STATE_NO_CORES_REQUESTED: + /* Initial state - nothing required */ + KBASE_DEBUG_ASSERT(affinity == 0); + break; + + default: + KBASE_DEBUG_ASSERT_MSG(false, + "Unhandled coreref_state: %d", + coreref_state); + break; + } +} + +static void kbase_gpu_release_atom(struct kbase_device *kbdev, + struct kbase_jd_atom *katom, + ktime_t *end_timestamp) +{ + struct kbase_context *kctx = katom->kctx; + + switch (katom->gpu_rb_state) { + case KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB: + /* Should be impossible */ + WARN(1, "Attempting to release atom not in ringbuffer\n"); + break; + + case KBASE_ATOM_GPU_RB_SUBMITTED: + /* Inform power management at start/finish of atom so it can + * update its GPU utilisation metrics. Mark atom as not + * submitted beforehand. */ + katom->gpu_rb_state = KBASE_ATOM_GPU_RB_READY; + kbase_pm_metrics_update(kbdev, end_timestamp); + + if (katom->core_req & BASE_JD_REQ_PERMON) + kbase_pm_release_gpu_cycle_counter_nolock(kbdev); + /* ***FALLTHROUGH: TRANSITION TO LOWER STATE*** */ + + KBASE_TLSTREAM_TL_NRET_ATOM_LPU(katom, + &kbdev->gpu_props.props.raw_props.js_features + [katom->slot_nr]); + KBASE_TLSTREAM_TL_NRET_ATOM_AS(katom, &kbdev->as[kctx->as_nr]); + KBASE_TLSTREAM_TL_NRET_CTX_LPU(kctx, + &kbdev->gpu_props.props.raw_props.js_features + [katom->slot_nr]); + + case KBASE_ATOM_GPU_RB_READY: + /* ***FALLTHROUGH: TRANSITION TO LOWER STATE*** */ + + case KBASE_ATOM_GPU_RB_WAITING_AFFINITY: + kbase_js_affinity_release_slot_cores(kbdev, katom->slot_nr, + katom->affinity); + /* ***FALLTHROUGH: TRANSITION TO LOWER STATE*** */ + + case KBASE_ATOM_GPU_RB_WAITING_FOR_CORE_AVAILABLE: + break; + + case KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_TRANSITION: + if (katom->protected_state.enter != + KBASE_ATOM_ENTER_PROTECTED_CHECK || + katom->protected_state.exit != + KBASE_ATOM_EXIT_PROTECTED_CHECK) + kbdev->protected_mode_transition = false; + + if (kbase_jd_katom_is_protected(katom) && + (katom->protected_state.enter == + KBASE_ATOM_ENTER_PROTECTED_IDLE_L2)) { + kbase_vinstr_resume(kbdev->vinstr_ctx); + + /* Go back to configured model for IPA */ + kbase_ipa_model_use_configured_locked(kbdev); + } + + + /* ***FALLTHROUGH: TRANSITION TO LOWER STATE*** */ + + case KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_PREV: + /* ***FALLTHROUGH: TRANSITION TO LOWER STATE*** */ + + case KBASE_ATOM_GPU_RB_WAITING_BLOCKED: + /* ***FALLTHROUGH: TRANSITION TO LOWER STATE*** */ + + case KBASE_ATOM_GPU_RB_RETURN_TO_JS: + break; + } + + katom->gpu_rb_state = KBASE_ATOM_GPU_RB_WAITING_BLOCKED; + katom->protected_state.exit = KBASE_ATOM_EXIT_PROTECTED_CHECK; +} + +static void kbase_gpu_mark_atom_for_return(struct kbase_device *kbdev, + struct kbase_jd_atom *katom) +{ + kbase_gpu_release_atom(kbdev, katom, NULL); + katom->gpu_rb_state = KBASE_ATOM_GPU_RB_RETURN_TO_JS; +} + +static inline bool kbase_gpu_rmu_workaround(struct kbase_device *kbdev, int js) +{ + struct kbase_backend_data *backend = &kbdev->hwaccess.backend; + bool slot_busy[3]; + + if (!kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8987)) + return true; + slot_busy[0] = kbase_gpu_nr_atoms_on_slot_min(kbdev, 0, + KBASE_ATOM_GPU_RB_WAITING_AFFINITY); + slot_busy[1] = kbase_gpu_nr_atoms_on_slot_min(kbdev, 1, + KBASE_ATOM_GPU_RB_WAITING_AFFINITY); + slot_busy[2] = kbase_gpu_nr_atoms_on_slot_min(kbdev, 2, + KBASE_ATOM_GPU_RB_WAITING_AFFINITY); + + if ((js == 2 && !(slot_busy[0] || slot_busy[1])) || + (js != 2 && !slot_busy[2])) + return true; + + /* Don't submit slot 2 atom while GPU has jobs on slots 0/1 */ + if (js == 2 && (kbase_gpu_atoms_submitted(kbdev, 0) || + kbase_gpu_atoms_submitted(kbdev, 1) || + backend->rmu_workaround_flag)) + return false; + + /* Don't submit slot 0/1 atom while GPU has jobs on slot 2 */ + if (js != 2 && (kbase_gpu_atoms_submitted(kbdev, 2) || + !backend->rmu_workaround_flag)) + return false; + + backend->rmu_workaround_flag = !backend->rmu_workaround_flag; + + return true; +} + +/** + * other_slots_busy - Determine if any job slots other than @js are currently + * running atoms + * @kbdev: Device pointer + * @js: Job slot + * + * Return: true if any slots other than @js are busy, false otherwise + */ +static inline bool other_slots_busy(struct kbase_device *kbdev, int js) +{ + int slot; + + for (slot = 0; slot < kbdev->gpu_props.num_job_slots; slot++) { + if (slot == js) + continue; + + if (kbase_gpu_nr_atoms_on_slot_min(kbdev, slot, + KBASE_ATOM_GPU_RB_SUBMITTED)) + return true; + } + + return false; +} + +static inline bool kbase_gpu_in_protected_mode(struct kbase_device *kbdev) +{ + return kbdev->protected_mode; +} + +static int kbase_gpu_protected_mode_enter(struct kbase_device *kbdev) +{ + int err = -EINVAL; + + lockdep_assert_held(&kbdev->hwaccess_lock); + + WARN_ONCE(!kbdev->protected_ops, + "Cannot enter protected mode: protected callbacks not specified.\n"); + + /* + * When entering into protected mode, we must ensure that the + * GPU is not operating in coherent mode as well. This is to + * ensure that no protected memory can be leaked. + */ + if (kbdev->system_coherency == COHERENCY_ACE) + kbase_cache_set_coherency_mode(kbdev, COHERENCY_ACE_LITE); + + if (kbdev->protected_ops) { + /* Switch GPU to protected mode */ + err = kbdev->protected_ops->protected_mode_enable( + kbdev->protected_dev); + + if (err) + dev_warn(kbdev->dev, "Failed to enable protected mode: %d\n", + err); + else + kbdev->protected_mode = true; + } + + return err; +} + +static int kbase_gpu_protected_mode_reset(struct kbase_device *kbdev) +{ + lockdep_assert_held(&kbdev->hwaccess_lock); + + WARN_ONCE(!kbdev->protected_ops, + "Cannot exit protected mode: protected callbacks not specified.\n"); + + if (!kbdev->protected_ops) + return -EINVAL; + + /* The protected mode disable callback will be called as part of reset + */ + kbase_reset_gpu_silent(kbdev); + + return 0; +} + +static int kbase_jm_enter_protected_mode(struct kbase_device *kbdev, + struct kbase_jd_atom **katom, int idx, int js) +{ + int err = 0; + + switch (katom[idx]->protected_state.enter) { + case KBASE_ATOM_ENTER_PROTECTED_CHECK: + KBASE_TLSTREAM_AUX_PROTECTED_ENTER_START(kbdev); + /* The checks in KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_PREV + * should ensure that we are not already transitiong, and that + * there are no atoms currently on the GPU. */ + WARN_ON(kbdev->protected_mode_transition); + WARN_ON(kbase_gpu_atoms_submitted_any(kbdev)); + + kbdev->protected_mode_transition = true; + katom[idx]->protected_state.enter = + KBASE_ATOM_ENTER_PROTECTED_VINSTR; + + /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */ + + case KBASE_ATOM_ENTER_PROTECTED_VINSTR: + if (kbase_vinstr_try_suspend(kbdev->vinstr_ctx) < 0) { + /* + * We can't switch now because + * the vinstr core state switch + * is not done yet. + */ + return -EAGAIN; + } + + /* Use generic model for IPA in protected mode */ + kbase_ipa_model_use_fallback_locked(kbdev); + + /* Once reaching this point GPU must be + * switched to protected mode or vinstr + * re-enabled. */ + + /* + * Not in correct mode, begin protected mode switch. + * Entering protected mode requires us to power down the L2, + * and drop out of fully coherent mode. + */ + katom[idx]->protected_state.enter = + KBASE_ATOM_ENTER_PROTECTED_IDLE_L2; + + kbase_pm_update_cores_state_nolock(kbdev); + + /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */ + + case KBASE_ATOM_ENTER_PROTECTED_IDLE_L2: + /* Avoid unnecessary waiting on non-ACE platforms. */ + if (kbdev->current_gpu_coherency_mode == COHERENCY_ACE) { + if (kbase_pm_get_ready_cores(kbdev, KBASE_PM_CORE_L2) || + kbase_pm_get_trans_cores(kbdev, KBASE_PM_CORE_L2)) { + /* + * The L2 is still powered, wait for all the users to + * finish with it before doing the actual reset. + */ + return -EAGAIN; + } + } + + katom[idx]->protected_state.enter = + KBASE_ATOM_ENTER_PROTECTED_FINISHED; + + /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */ + + case KBASE_ATOM_ENTER_PROTECTED_FINISHED: + + /* No jobs running, so we can switch GPU mode right now. */ + err = kbase_gpu_protected_mode_enter(kbdev); + + /* + * Regardless of result, we are no longer transitioning + * the GPU. + */ + kbdev->protected_mode_transition = false; + KBASE_TLSTREAM_AUX_PROTECTED_ENTER_END(kbdev); + if (err) { + /* + * Failed to switch into protected mode, resume + * vinstr core and fail atom. + */ + kbase_vinstr_resume(kbdev->vinstr_ctx); + katom[idx]->event_code = BASE_JD_EVENT_JOB_INVALID; + kbase_gpu_mark_atom_for_return(kbdev, katom[idx]); + /* Only return if head atom or previous atom + * already removed - as atoms must be returned + * in order. */ + if (idx == 0 || katom[0]->gpu_rb_state == + KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB) { + kbase_gpu_dequeue_atom(kbdev, js, NULL); + kbase_jm_return_atom_to_js(kbdev, katom[idx]); + } + + /* Go back to configured model for IPA */ + kbase_ipa_model_use_configured_locked(kbdev); + + return -EINVAL; + } + + /* Protected mode sanity checks. */ + KBASE_DEBUG_ASSERT_MSG( + kbase_jd_katom_is_protected(katom[idx]) == + kbase_gpu_in_protected_mode(kbdev), + "Protected mode of atom (%d) doesn't match protected mode of GPU (%d)", + kbase_jd_katom_is_protected(katom[idx]), + kbase_gpu_in_protected_mode(kbdev)); + katom[idx]->gpu_rb_state = + KBASE_ATOM_GPU_RB_READY; + } + + return 0; +} + +static int kbase_jm_exit_protected_mode(struct kbase_device *kbdev, + struct kbase_jd_atom **katom, int idx, int js) +{ + int err = 0; + + + switch (katom[idx]->protected_state.exit) { + case KBASE_ATOM_EXIT_PROTECTED_CHECK: + KBASE_TLSTREAM_AUX_PROTECTED_LEAVE_START(kbdev); + /* The checks in KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_PREV + * should ensure that we are not already transitiong, and that + * there are no atoms currently on the GPU. */ + WARN_ON(kbdev->protected_mode_transition); + WARN_ON(kbase_gpu_atoms_submitted_any(kbdev)); + + /* + * Exiting protected mode requires a reset, but first the L2 + * needs to be powered down to ensure it's not active when the + * reset is issued. + */ + katom[idx]->protected_state.exit = + KBASE_ATOM_EXIT_PROTECTED_IDLE_L2; + + kbdev->protected_mode_transition = true; + kbase_pm_update_cores_state_nolock(kbdev); + + /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */ + case KBASE_ATOM_EXIT_PROTECTED_IDLE_L2: + if (kbase_pm_get_ready_cores(kbdev, KBASE_PM_CORE_L2) || + kbase_pm_get_trans_cores(kbdev, KBASE_PM_CORE_L2)) { + /* + * The L2 is still powered, wait for all the users to + * finish with it before doing the actual reset. + */ + return -EAGAIN; + } + katom[idx]->protected_state.exit = + KBASE_ATOM_EXIT_PROTECTED_RESET; + + /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */ + + case KBASE_ATOM_EXIT_PROTECTED_RESET: + /* Issue the reset to the GPU */ + err = kbase_gpu_protected_mode_reset(kbdev); + + if (err) { + kbdev->protected_mode_transition = false; + + /* Failed to exit protected mode, fail atom */ + katom[idx]->event_code = BASE_JD_EVENT_JOB_INVALID; + kbase_gpu_mark_atom_for_return(kbdev, katom[idx]); + /* Only return if head atom or previous atom + * already removed - as atoms must be returned + * in order */ + if (idx == 0 || katom[0]->gpu_rb_state == + KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB) { + kbase_gpu_dequeue_atom(kbdev, js, NULL); + kbase_jm_return_atom_to_js(kbdev, katom[idx]); + } + + kbase_vinstr_resume(kbdev->vinstr_ctx); + + /* Use generic model for IPA in protected mode */ + kbase_ipa_model_use_fallback_locked(kbdev); + + return -EINVAL; + } + + katom[idx]->protected_state.exit = + KBASE_ATOM_EXIT_PROTECTED_RESET_WAIT; + + /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */ + + case KBASE_ATOM_EXIT_PROTECTED_RESET_WAIT: + /* A GPU reset is issued when exiting protected mode. Once the + * reset is done all atoms' state will also be reset. For this + * reason, if the atom is still in this state we can safely + * say that the reset has not completed i.e., we have not + * finished exiting protected mode yet. + */ + return -EAGAIN; + } + + return 0; +} + +void kbase_backend_slot_update(struct kbase_device *kbdev) +{ + int js; + + lockdep_assert_held(&kbdev->hwaccess_lock); + + for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) { + struct kbase_jd_atom *katom[2]; + int idx; + + katom[0] = kbase_gpu_inspect(kbdev, js, 0); + katom[1] = kbase_gpu_inspect(kbdev, js, 1); + WARN_ON(katom[1] && !katom[0]); + + for (idx = 0; idx < SLOT_RB_SIZE; idx++) { + bool cores_ready; + int ret; + + if (!katom[idx]) + continue; + + switch (katom[idx]->gpu_rb_state) { + case KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB: + /* Should be impossible */ + WARN(1, "Attempting to update atom not in ringbuffer\n"); + break; + + case KBASE_ATOM_GPU_RB_WAITING_BLOCKED: + if (katom[idx]->atom_flags & + KBASE_KATOM_FLAG_X_DEP_BLOCKED) + break; + + katom[idx]->gpu_rb_state = + KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_PREV; + + /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */ + + case KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_PREV: + if (kbase_gpu_check_secure_atoms(kbdev, + !kbase_jd_katom_is_protected( + katom[idx]))) + break; + + if ((idx == 1) && (kbase_jd_katom_is_protected( + katom[0]) != + kbase_jd_katom_is_protected( + katom[1]))) + break; + + if (kbdev->protected_mode_transition) + break; + + katom[idx]->gpu_rb_state = + KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_TRANSITION; + + /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */ + + case KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_TRANSITION: + + /* + * Exiting protected mode must be done before + * the references on the cores are taken as + * a power down the L2 is required which + * can't happen after the references for this + * atom are taken. + */ + + if (!kbase_gpu_in_protected_mode(kbdev) && + kbase_jd_katom_is_protected(katom[idx])) { + /* Atom needs to transition into protected mode. */ + ret = kbase_jm_enter_protected_mode(kbdev, + katom, idx, js); + if (ret) + break; + } else if (kbase_gpu_in_protected_mode(kbdev) && + !kbase_jd_katom_is_protected(katom[idx])) { + /* Atom needs to transition out of protected mode. */ + ret = kbase_jm_exit_protected_mode(kbdev, + katom, idx, js); + if (ret) + break; + } + katom[idx]->protected_state.exit = + KBASE_ATOM_EXIT_PROTECTED_CHECK; + + /* Atom needs no protected mode transition. */ + + katom[idx]->gpu_rb_state = + KBASE_ATOM_GPU_RB_WAITING_FOR_CORE_AVAILABLE; + + /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */ + + case KBASE_ATOM_GPU_RB_WAITING_FOR_CORE_AVAILABLE: + if (katom[idx]->will_fail_event_code) { + kbase_gpu_mark_atom_for_return(kbdev, + katom[idx]); + /* Set EVENT_DONE so this atom will be + completed, not unpulled. */ + katom[idx]->event_code = + BASE_JD_EVENT_DONE; + /* Only return if head atom or previous + * atom already removed - as atoms must + * be returned in order. */ + if (idx == 0 || katom[0]->gpu_rb_state == + KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB) { + kbase_gpu_dequeue_atom(kbdev, js, NULL); + kbase_jm_return_atom_to_js(kbdev, katom[idx]); + } + break; + } + + cores_ready = + kbasep_js_job_check_ref_cores(kbdev, js, + katom[idx]); + + if (katom[idx]->event_code == + BASE_JD_EVENT_PM_EVENT) { + katom[idx]->gpu_rb_state = + KBASE_ATOM_GPU_RB_RETURN_TO_JS; + break; + } + + if (!cores_ready) + break; + + kbase_js_affinity_retain_slot_cores(kbdev, js, + katom[idx]->affinity); + katom[idx]->gpu_rb_state = + KBASE_ATOM_GPU_RB_WAITING_AFFINITY; + + /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */ + + case KBASE_ATOM_GPU_RB_WAITING_AFFINITY: + if (!kbase_gpu_rmu_workaround(kbdev, js)) + break; + + katom[idx]->gpu_rb_state = + KBASE_ATOM_GPU_RB_READY; + + /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */ + + case KBASE_ATOM_GPU_RB_READY: + + if (idx == 1) { + /* Only submit if head atom or previous + * atom already submitted */ + if ((katom[0]->gpu_rb_state != + KBASE_ATOM_GPU_RB_SUBMITTED && + katom[0]->gpu_rb_state != + KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB)) + break; + + /* If intra-slot serialization in use + * then don't submit atom to NEXT slot + */ + if (kbdev->serialize_jobs & + KBASE_SERIALIZE_INTRA_SLOT) + break; + } + + /* If inter-slot serialization in use then don't + * submit atom if any other slots are in use */ + if ((kbdev->serialize_jobs & + KBASE_SERIALIZE_INTER_SLOT) && + other_slots_busy(kbdev, js)) + break; + + if ((kbdev->serialize_jobs & + KBASE_SERIALIZE_RESET) && + kbase_reset_gpu_active(kbdev)) + break; + + /* Check if this job needs the cycle counter + * enabled before submission */ + if (katom[idx]->core_req & BASE_JD_REQ_PERMON) + kbase_pm_request_gpu_cycle_counter_l2_is_on( + kbdev); + + kbase_job_hw_submit(kbdev, katom[idx], js); + katom[idx]->gpu_rb_state = + KBASE_ATOM_GPU_RB_SUBMITTED; + + /* Inform power management at start/finish of + * atom so it can update its GPU utilisation + * metrics. */ + kbase_pm_metrics_update(kbdev, + &katom[idx]->start_timestamp); + + /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */ + + case KBASE_ATOM_GPU_RB_SUBMITTED: + /* Atom submitted to HW, nothing else to do */ + break; + + case KBASE_ATOM_GPU_RB_RETURN_TO_JS: + /* Only return if head atom or previous atom + * already removed - as atoms must be returned + * in order */ + if (idx == 0 || katom[0]->gpu_rb_state == + KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB) { + kbase_gpu_dequeue_atom(kbdev, js, NULL); + kbase_jm_return_atom_to_js(kbdev, + katom[idx]); + } + break; + } + } + } + + /* Warn if PRLAM-8987 affinity restrictions are violated */ + if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8987)) + WARN_ON((kbase_gpu_atoms_submitted(kbdev, 0) || + kbase_gpu_atoms_submitted(kbdev, 1)) && + kbase_gpu_atoms_submitted(kbdev, 2)); +} + + +void kbase_backend_run_atom(struct kbase_device *kbdev, + struct kbase_jd_atom *katom) +{ + lockdep_assert_held(&kbdev->hwaccess_lock); + kbase_gpu_enqueue_atom(kbdev, katom); + kbase_backend_slot_update(kbdev); +} + +#define HAS_DEP(katom) (katom->pre_dep || katom->atom_flags & \ + (KBASE_KATOM_FLAG_X_DEP_BLOCKED | KBASE_KATOM_FLAG_FAIL_BLOCKER)) + +bool kbase_gpu_irq_evict(struct kbase_device *kbdev, int js) +{ + struct kbase_jd_atom *katom; + struct kbase_jd_atom *next_katom; + + lockdep_assert_held(&kbdev->hwaccess_lock); + + katom = kbase_gpu_inspect(kbdev, js, 0); + next_katom = kbase_gpu_inspect(kbdev, js, 1); + + if (next_katom && katom->kctx == next_katom->kctx && + next_katom->gpu_rb_state == KBASE_ATOM_GPU_RB_SUBMITTED && + HAS_DEP(next_katom) && + (kbase_reg_read(kbdev, JOB_SLOT_REG(js, JS_HEAD_NEXT_LO), NULL) + != 0 || + kbase_reg_read(kbdev, JOB_SLOT_REG(js, JS_HEAD_NEXT_HI), NULL) + != 0)) { + kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_COMMAND_NEXT), + JS_COMMAND_NOP, NULL); + next_katom->gpu_rb_state = KBASE_ATOM_GPU_RB_READY; + + KBASE_TLSTREAM_TL_NRET_ATOM_LPU(katom, + &kbdev->gpu_props.props.raw_props.js_features + [katom->slot_nr]); + KBASE_TLSTREAM_TL_NRET_ATOM_AS(katom, &kbdev->as + [katom->kctx->as_nr]); + KBASE_TLSTREAM_TL_NRET_CTX_LPU(katom->kctx, + &kbdev->gpu_props.props.raw_props.js_features + [katom->slot_nr]); + + return true; + } + + return false; +} + +void kbase_gpu_complete_hw(struct kbase_device *kbdev, int js, + u32 completion_code, + u64 job_tail, + ktime_t *end_timestamp) +{ + struct kbase_jd_atom *katom = kbase_gpu_inspect(kbdev, js, 0); + struct kbase_context *kctx = katom->kctx; + + lockdep_assert_held(&kbdev->hwaccess_lock); + + /* + * When a hard-stop is followed close after a soft-stop, the completion + * code may be set to STOPPED, even though the job is terminated + */ + if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_TMIX_8438)) { + if (completion_code == BASE_JD_EVENT_STOPPED && + (katom->atom_flags & + KBASE_KATOM_FLAG_BEEN_HARD_STOPPED)) { + completion_code = BASE_JD_EVENT_TERMINATED; + } + } + + if ((kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_6787) || (katom->core_req & + BASE_JD_REQ_SKIP_CACHE_END)) && + completion_code != BASE_JD_EVENT_DONE && + !(completion_code & BASE_JD_SW_EVENT)) { + /* When a job chain fails, on a T60x or when + * BASE_JD_REQ_SKIP_CACHE_END is set, the GPU cache is not + * flushed. To prevent future evictions causing possible memory + * corruption we need to flush the cache manually before any + * affected memory gets reused. */ + katom->need_cache_flush_cores_retained = katom->affinity; + kbase_pm_request_cores(kbdev, false, katom->affinity); + } else if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10676)) { + if (kbdev->gpu_props.num_core_groups > 1 && + !(katom->affinity & + kbdev->gpu_props.props.coherency_info.group[0].core_mask + ) && + (katom->affinity & + kbdev->gpu_props.props.coherency_info.group[1].core_mask + )) { + dev_info(kbdev->dev, "JD: Flushing cache due to PRLAM-10676\n"); + katom->need_cache_flush_cores_retained = + katom->affinity; + kbase_pm_request_cores(kbdev, false, + katom->affinity); + } + } + + katom = kbase_gpu_dequeue_atom(kbdev, js, end_timestamp); + kbase_timeline_job_slot_done(kbdev, katom->kctx, katom, js, 0); + + if (completion_code == BASE_JD_EVENT_STOPPED) { + struct kbase_jd_atom *next_katom = kbase_gpu_inspect(kbdev, js, + 0); + + /* + * Dequeue next atom from ringbuffers on same slot if required. + * This atom will already have been removed from the NEXT + * registers by kbase_gpu_soft_hard_stop_slot(), to ensure that + * the atoms on this slot are returned in the correct order. + */ + if (next_katom && katom->kctx == next_katom->kctx && + next_katom->sched_priority == + katom->sched_priority) { + kbase_gpu_dequeue_atom(kbdev, js, end_timestamp); + kbase_jm_return_atom_to_js(kbdev, next_katom); + } + } else if (completion_code != BASE_JD_EVENT_DONE) { + struct kbasep_js_device_data *js_devdata = &kbdev->js_data; + int i; + +#if KBASE_TRACE_DUMP_ON_JOB_SLOT_ERROR != 0 + KBASE_TRACE_DUMP(kbdev); +#endif + kbasep_js_clear_submit_allowed(js_devdata, katom->kctx); + + /* + * Remove all atoms on the same context from ringbuffers. This + * will not remove atoms that are already on the GPU, as these + * are guaranteed not to have fail dependencies on the failed + * atom. + */ + for (i = 0; i < kbdev->gpu_props.num_job_slots; i++) { + struct kbase_jd_atom *katom_idx0 = + kbase_gpu_inspect(kbdev, i, 0); + struct kbase_jd_atom *katom_idx1 = + kbase_gpu_inspect(kbdev, i, 1); + + if (katom_idx0 && katom_idx0->kctx == katom->kctx && + HAS_DEP(katom_idx0) && + katom_idx0->gpu_rb_state != + KBASE_ATOM_GPU_RB_SUBMITTED) { + /* Dequeue katom_idx0 from ringbuffer */ + kbase_gpu_dequeue_atom(kbdev, i, end_timestamp); + + if (katom_idx1 && + katom_idx1->kctx == katom->kctx + && HAS_DEP(katom_idx1) && + katom_idx0->gpu_rb_state != + KBASE_ATOM_GPU_RB_SUBMITTED) { + /* Dequeue katom_idx1 from ringbuffer */ + kbase_gpu_dequeue_atom(kbdev, i, + end_timestamp); + + katom_idx1->event_code = + BASE_JD_EVENT_STOPPED; + kbase_jm_return_atom_to_js(kbdev, + katom_idx1); + } + katom_idx0->event_code = BASE_JD_EVENT_STOPPED; + kbase_jm_return_atom_to_js(kbdev, katom_idx0); + + } else if (katom_idx1 && + katom_idx1->kctx == katom->kctx && + HAS_DEP(katom_idx1) && + katom_idx1->gpu_rb_state != + KBASE_ATOM_GPU_RB_SUBMITTED) { + /* Can not dequeue this atom yet - will be + * dequeued when atom at idx0 completes */ + katom_idx1->event_code = BASE_JD_EVENT_STOPPED; + kbase_gpu_mark_atom_for_return(kbdev, + katom_idx1); + } + } + } + + KBASE_TRACE_ADD_SLOT_INFO(kbdev, JM_JOB_DONE, kctx, katom, katom->jc, + js, completion_code); + + if (job_tail != 0 && job_tail != katom->jc) { + bool was_updated = (job_tail != katom->jc); + + /* Some of the job has been executed, so we update the job chain + * address to where we should resume from */ + katom->jc = job_tail; + if (was_updated) + KBASE_TRACE_ADD_SLOT(kbdev, JM_UPDATE_HEAD, katom->kctx, + katom, job_tail, js); + } + + /* Only update the event code for jobs that weren't cancelled */ + if (katom->event_code != BASE_JD_EVENT_JOB_CANCELLED) + katom->event_code = (base_jd_event_code)completion_code; + + kbase_device_trace_register_access(kctx, REG_WRITE, + JOB_CONTROL_REG(JOB_IRQ_CLEAR), + 1 << js); + + /* Complete the job, and start new ones + * + * Also defer remaining work onto the workqueue: + * - Re-queue Soft-stopped jobs + * - For any other jobs, queue the job back into the dependency system + * - Schedule out the parent context if necessary, and schedule a new + * one in. + */ +#ifdef CONFIG_GPU_TRACEPOINTS + { + /* The atom in the HEAD */ + struct kbase_jd_atom *next_katom = kbase_gpu_inspect(kbdev, js, + 0); + + if (next_katom && next_katom->gpu_rb_state == + KBASE_ATOM_GPU_RB_SUBMITTED) { + char js_string[16]; + + trace_gpu_sched_switch(kbasep_make_job_slot_string(js, + js_string, + sizeof(js_string)), + ktime_to_ns(*end_timestamp), + (u32)next_katom->kctx->id, 0, + next_katom->work_id); + kbdev->hwaccess.backend.slot_rb[js].last_context = + next_katom->kctx; + } else { + char js_string[16]; + + trace_gpu_sched_switch(kbasep_make_job_slot_string(js, + js_string, + sizeof(js_string)), + ktime_to_ns(ktime_get()), 0, 0, + 0); + kbdev->hwaccess.backend.slot_rb[js].last_context = 0; + } + } +#endif + + if (kbdev->serialize_jobs & KBASE_SERIALIZE_RESET) + kbase_reset_gpu_silent(kbdev); + + if (completion_code == BASE_JD_EVENT_STOPPED) + katom = kbase_jm_return_atom_to_js(kbdev, katom); + else + katom = kbase_jm_complete(kbdev, katom, end_timestamp); + + if (katom) { + /* Cross-slot dependency has now become runnable. Try to submit + * it. */ + + /* Check if there are lower priority jobs to soft stop */ + kbase_job_slot_ctx_priority_check_locked(kctx, katom); + + kbase_jm_try_kick(kbdev, 1 << katom->slot_nr); + } + + /* Job completion may have unblocked other atoms. Try to update all job + * slots */ + kbase_backend_slot_update(kbdev); +} + +void kbase_backend_reset(struct kbase_device *kbdev, ktime_t *end_timestamp) +{ + int js; + + lockdep_assert_held(&kbdev->hwaccess_lock); + + /* Reset should always take the GPU out of protected mode */ + WARN_ON(kbase_gpu_in_protected_mode(kbdev)); + + for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) { + int atom_idx = 0; + int idx; + + for (idx = 0; idx < SLOT_RB_SIZE; idx++) { + struct kbase_jd_atom *katom = kbase_gpu_inspect(kbdev, + js, atom_idx); + bool keep_in_jm_rb = false; + + if (!katom) + break; + if (katom->protected_state.exit == + KBASE_ATOM_EXIT_PROTECTED_RESET_WAIT) + { + KBASE_TLSTREAM_AUX_PROTECTED_LEAVE_END(kbdev); + + kbase_vinstr_resume(kbdev->vinstr_ctx); + + /* protected mode sanity checks */ + KBASE_DEBUG_ASSERT_MSG( + kbase_jd_katom_is_protected(katom) == kbase_gpu_in_protected_mode(kbdev), + "Protected mode of atom (%d) doesn't match protected mode of GPU (%d)", + kbase_jd_katom_is_protected(katom), kbase_gpu_in_protected_mode(kbdev)); + KBASE_DEBUG_ASSERT_MSG( + (kbase_jd_katom_is_protected(katom) && js == 0) || + !kbase_jd_katom_is_protected(katom), + "Protected atom on JS%d not supported", js); + } + if (katom->gpu_rb_state < KBASE_ATOM_GPU_RB_SUBMITTED) + keep_in_jm_rb = true; + + kbase_gpu_release_atom(kbdev, katom, NULL); + + /* + * If the atom wasn't on HW when the reset was issued + * then leave it in the RB and next time we're kicked + * it will be processed again from the starting state. + */ + if (keep_in_jm_rb) { + kbasep_js_job_check_deref_cores(kbdev, katom); + katom->coreref_state = KBASE_ATOM_COREREF_STATE_NO_CORES_REQUESTED; + katom->affinity = 0; + katom->protected_state.exit = KBASE_ATOM_EXIT_PROTECTED_CHECK; + /* As the atom was not removed, increment the + * index so that we read the correct atom in the + * next iteration. */ + atom_idx++; + continue; + } + + /* + * The atom was on the HW when the reset was issued + * all we can do is fail the atom. + */ + kbase_gpu_dequeue_atom(kbdev, js, NULL); + katom->event_code = BASE_JD_EVENT_JOB_CANCELLED; + kbase_jm_complete(kbdev, katom, end_timestamp); + } + } + + kbdev->protected_mode_transition = false; +} + +static inline void kbase_gpu_stop_atom(struct kbase_device *kbdev, + int js, + struct kbase_jd_atom *katom, + u32 action) +{ + u32 hw_action = action & JS_COMMAND_MASK; + + kbase_job_check_enter_disjoint(kbdev, action, katom->core_req, katom); + kbasep_job_slot_soft_or_hard_stop_do_action(kbdev, js, hw_action, + katom->core_req, katom); + katom->kctx->blocked_js[js][katom->sched_priority] = true; +} + +static inline void kbase_gpu_remove_atom(struct kbase_device *kbdev, + struct kbase_jd_atom *katom, + u32 action, + bool disjoint) +{ + katom->event_code = BASE_JD_EVENT_REMOVED_FROM_NEXT; + kbase_gpu_mark_atom_for_return(kbdev, katom); + katom->kctx->blocked_js[katom->slot_nr][katom->sched_priority] = true; + + if (disjoint) + kbase_job_check_enter_disjoint(kbdev, action, katom->core_req, + katom); +} + +static int should_stop_x_dep_slot(struct kbase_jd_atom *katom) +{ + if (katom->x_post_dep) { + struct kbase_jd_atom *dep_atom = katom->x_post_dep; + + if (dep_atom->gpu_rb_state != + KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB && + dep_atom->gpu_rb_state != + KBASE_ATOM_GPU_RB_RETURN_TO_JS) + return dep_atom->slot_nr; + } + return -1; +} + +static void kbase_job_evicted(struct kbase_jd_atom *katom) +{ + kbase_timeline_job_slot_done(katom->kctx->kbdev, katom->kctx, katom, + katom->slot_nr, KBASE_JS_ATOM_DONE_EVICTED_FROM_NEXT); +} + +bool kbase_backend_soft_hard_stop_slot(struct kbase_device *kbdev, + struct kbase_context *kctx, + int js, + struct kbase_jd_atom *katom, + u32 action) +{ + struct kbase_jd_atom *katom_idx0; + struct kbase_jd_atom *katom_idx1; + + bool katom_idx0_valid, katom_idx1_valid; + + bool ret = false; + + int stop_x_dep_idx0 = -1, stop_x_dep_idx1 = -1; + int prio_idx0 = 0, prio_idx1 = 0; + + lockdep_assert_held(&kbdev->hwaccess_lock); + + katom_idx0 = kbase_gpu_inspect(kbdev, js, 0); + katom_idx1 = kbase_gpu_inspect(kbdev, js, 1); + + if (katom_idx0) + prio_idx0 = katom_idx0->sched_priority; + if (katom_idx1) + prio_idx1 = katom_idx1->sched_priority; + + if (katom) { + katom_idx0_valid = (katom_idx0 == katom); + /* If idx0 is to be removed and idx1 is on the same context, + * then idx1 must also be removed otherwise the atoms might be + * returned out of order */ + if (katom_idx1) + katom_idx1_valid = (katom_idx1 == katom) || + (katom_idx0_valid && + (katom_idx0->kctx == + katom_idx1->kctx)); + else + katom_idx1_valid = false; + } else { + katom_idx0_valid = (katom_idx0 && + (!kctx || katom_idx0->kctx == kctx)); + katom_idx1_valid = (katom_idx1 && + (!kctx || katom_idx1->kctx == kctx) && + prio_idx0 == prio_idx1); + } + + if (katom_idx0_valid) + stop_x_dep_idx0 = should_stop_x_dep_slot(katom_idx0); + if (katom_idx1_valid) + stop_x_dep_idx1 = should_stop_x_dep_slot(katom_idx1); + + if (katom_idx0_valid) { + if (katom_idx0->gpu_rb_state != KBASE_ATOM_GPU_RB_SUBMITTED) { + /* Simple case - just dequeue and return */ + kbase_gpu_dequeue_atom(kbdev, js, NULL); + if (katom_idx1_valid) { + kbase_gpu_dequeue_atom(kbdev, js, NULL); + katom_idx1->event_code = + BASE_JD_EVENT_REMOVED_FROM_NEXT; + kbase_jm_return_atom_to_js(kbdev, katom_idx1); + katom_idx1->kctx->blocked_js[js][prio_idx1] = + true; + } + + katom_idx0->event_code = + BASE_JD_EVENT_REMOVED_FROM_NEXT; + kbase_jm_return_atom_to_js(kbdev, katom_idx0); + katom_idx0->kctx->blocked_js[js][prio_idx0] = true; + } else { + /* katom_idx0 is on GPU */ + if (katom_idx1 && katom_idx1->gpu_rb_state == + KBASE_ATOM_GPU_RB_SUBMITTED) { + /* katom_idx0 and katom_idx1 are on GPU */ + + if (kbase_reg_read(kbdev, JOB_SLOT_REG(js, + JS_COMMAND_NEXT), NULL) == 0) { + /* idx0 has already completed - stop + * idx1 if needed*/ + if (katom_idx1_valid) { + kbase_gpu_stop_atom(kbdev, js, + katom_idx1, + action); + ret = true; + } + } else { + /* idx1 is in NEXT registers - attempt + * to remove */ + kbase_reg_write(kbdev, + JOB_SLOT_REG(js, + JS_COMMAND_NEXT), + JS_COMMAND_NOP, NULL); + + if (kbase_reg_read(kbdev, + JOB_SLOT_REG(js, + JS_HEAD_NEXT_LO), NULL) + != 0 || + kbase_reg_read(kbdev, + JOB_SLOT_REG(js, + JS_HEAD_NEXT_HI), NULL) + != 0) { + /* idx1 removed successfully, + * will be handled in IRQ */ + kbase_job_evicted(katom_idx1); + kbase_gpu_remove_atom(kbdev, + katom_idx1, + action, true); + stop_x_dep_idx1 = + should_stop_x_dep_slot(katom_idx1); + + /* stop idx0 if still on GPU */ + kbase_gpu_stop_atom(kbdev, js, + katom_idx0, + action); + ret = true; + } else if (katom_idx1_valid) { + /* idx0 has already completed, + * stop idx1 if needed */ + kbase_gpu_stop_atom(kbdev, js, + katom_idx1, + action); + ret = true; + } + } + } else if (katom_idx1_valid) { + /* idx1 not on GPU but must be dequeued*/ + + /* idx1 will be handled in IRQ */ + kbase_gpu_remove_atom(kbdev, katom_idx1, action, + false); + /* stop idx0 */ + /* This will be repeated for anything removed + * from the next registers, since their normal + * flow was also interrupted, and this function + * might not enter disjoint state e.g. if we + * don't actually do a hard stop on the head + * atom */ + kbase_gpu_stop_atom(kbdev, js, katom_idx0, + action); + ret = true; + } else { + /* no atom in idx1 */ + /* just stop idx0 */ + kbase_gpu_stop_atom(kbdev, js, katom_idx0, + action); + ret = true; + } + } + } else if (katom_idx1_valid) { + if (katom_idx1->gpu_rb_state != KBASE_ATOM_GPU_RB_SUBMITTED) { + /* Mark for return */ + /* idx1 will be returned once idx0 completes */ + kbase_gpu_remove_atom(kbdev, katom_idx1, action, + false); + } else { + /* idx1 is on GPU */ + if (kbase_reg_read(kbdev, JOB_SLOT_REG(js, + JS_COMMAND_NEXT), NULL) == 0) { + /* idx0 has already completed - stop idx1 */ + kbase_gpu_stop_atom(kbdev, js, katom_idx1, + action); + ret = true; + } else { + /* idx1 is in NEXT registers - attempt to + * remove */ + kbase_reg_write(kbdev, JOB_SLOT_REG(js, + JS_COMMAND_NEXT), + JS_COMMAND_NOP, NULL); + + if (kbase_reg_read(kbdev, JOB_SLOT_REG(js, + JS_HEAD_NEXT_LO), NULL) != 0 || + kbase_reg_read(kbdev, JOB_SLOT_REG(js, + JS_HEAD_NEXT_HI), NULL) != 0) { + /* idx1 removed successfully, will be + * handled in IRQ once idx0 completes */ + kbase_job_evicted(katom_idx1); + kbase_gpu_remove_atom(kbdev, katom_idx1, + action, + false); + } else { + /* idx0 has already completed - stop + * idx1 */ + kbase_gpu_stop_atom(kbdev, js, + katom_idx1, + action); + ret = true; + } + } + } + } + + + if (stop_x_dep_idx0 != -1) + kbase_backend_soft_hard_stop_slot(kbdev, kctx, stop_x_dep_idx0, + NULL, action); + + if (stop_x_dep_idx1 != -1) + kbase_backend_soft_hard_stop_slot(kbdev, kctx, stop_x_dep_idx1, + NULL, action); + + return ret; +} + +void kbase_gpu_cacheclean(struct kbase_device *kbdev) +{ + /* Limit the number of loops to avoid a hang if the interrupt is missed + */ + u32 max_loops = KBASE_CLEAN_CACHE_MAX_LOOPS; + + mutex_lock(&kbdev->cacheclean_lock); + + /* use GPU_COMMAND completion solution */ + /* clean & invalidate the caches */ + KBASE_TRACE_ADD(kbdev, CORE_GPU_CLEAN_INV_CACHES, NULL, NULL, 0u, 0); + kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND), + GPU_COMMAND_CLEAN_INV_CACHES, NULL); + + /* wait for cache flush to complete before continuing */ + while (--max_loops && + (kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_RAWSTAT), NULL) & + CLEAN_CACHES_COMPLETED) == 0) + ; + + /* clear the CLEAN_CACHES_COMPLETED irq */ + KBASE_TRACE_ADD(kbdev, CORE_GPU_IRQ_CLEAR, NULL, NULL, 0u, + CLEAN_CACHES_COMPLETED); + kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_CLEAR), + CLEAN_CACHES_COMPLETED, NULL); + KBASE_DEBUG_ASSERT_MSG(kbdev->hwcnt.backend.state != + KBASE_INSTR_STATE_CLEANING, + "Instrumentation code was cleaning caches, but Job Management code cleared their IRQ - Instrumentation code will now hang."); + + mutex_unlock(&kbdev->cacheclean_lock); +} + +void kbase_backend_cacheclean(struct kbase_device *kbdev, + struct kbase_jd_atom *katom) +{ + if (katom->need_cache_flush_cores_retained) { + unsigned long flags; + + kbase_gpu_cacheclean(kbdev); + + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); + kbase_pm_unrequest_cores(kbdev, false, + katom->need_cache_flush_cores_retained); + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); + katom->need_cache_flush_cores_retained = 0; + } +} + +void kbase_backend_complete_wq(struct kbase_device *kbdev, + struct kbase_jd_atom *katom) +{ + /* + * If cache flush required due to HW workaround then perform the flush + * now + */ + kbase_backend_cacheclean(kbdev, katom); + + if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10969) && + (katom->core_req & BASE_JD_REQ_FS) && + katom->event_code == BASE_JD_EVENT_TILE_RANGE_FAULT && + (katom->atom_flags & KBASE_KATOM_FLAG_BEEN_SOFT_STOPPPED) && + !(katom->atom_flags & KBASE_KATOM_FLAGS_RERUN)) { + dev_dbg(kbdev->dev, "Soft-stopped fragment shader job got a TILE_RANGE_FAULT. Possible HW issue, trying SW workaround\n"); + if (kbasep_10969_workaround_clamp_coordinates(katom)) { + /* The job had a TILE_RANGE_FAULT after was soft-stopped + * Due to an HW issue we try to execute the job again. + */ + dev_dbg(kbdev->dev, + "Clamping has been executed, try to rerun the job\n" + ); + katom->event_code = BASE_JD_EVENT_STOPPED; + katom->atom_flags |= KBASE_KATOM_FLAGS_RERUN; + } + } + + /* Clear the coreref_state now - while check_deref_cores() may not have + * been called yet, the caller will have taken a copy of this field. If + * this is not done, then if the atom is re-scheduled (following a soft + * stop) then the core reference would not be retaken. */ + katom->coreref_state = KBASE_ATOM_COREREF_STATE_NO_CORES_REQUESTED; + katom->affinity = 0; +} + +void kbase_backend_complete_wq_post_sched(struct kbase_device *kbdev, + base_jd_core_req core_req, u64 affinity, + enum kbase_atom_coreref_state coreref_state) +{ + unsigned long flags; + + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); + kbasep_js_job_check_deref_cores_nokatom(kbdev, core_req, affinity, + coreref_state); + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); + + if (!kbdev->pm.active_count) { + mutex_lock(&kbdev->js_data.runpool_mutex); + mutex_lock(&kbdev->pm.lock); + kbase_pm_update_active(kbdev); + mutex_unlock(&kbdev->pm.lock); + mutex_unlock(&kbdev->js_data.runpool_mutex); + } +} + +void kbase_gpu_dump_slots(struct kbase_device *kbdev) +{ + unsigned long flags; + int js; + + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); + + dev_info(kbdev->dev, "kbase_gpu_dump_slots:\n"); + + for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) { + int idx; + + for (idx = 0; idx < SLOT_RB_SIZE; idx++) { + struct kbase_jd_atom *katom = kbase_gpu_inspect(kbdev, + js, + idx); + + if (katom) + dev_info(kbdev->dev, + " js%d idx%d : katom=%p gpu_rb_state=%d\n", + js, idx, katom, katom->gpu_rb_state); + else + dev_info(kbdev->dev, " js%d idx%d : empty\n", + js, idx); + } + } + + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); +} + + + diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_rb.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_rb.h new file mode 100644 index 00000000000000..1e0e05ad3ea450 --- /dev/null +++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_rb.h @@ -0,0 +1,76 @@ +/* + * + * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + +/* + * Register-based HW access backend specific APIs + */ + +#ifndef _KBASE_HWACCESS_GPU_H_ +#define _KBASE_HWACCESS_GPU_H_ + +#include + +/** + * kbase_gpu_irq_evict - Evict an atom from a NEXT slot + * + * @kbdev: Device pointer + * @js: Job slot to evict from + * + * Evict the atom in the NEXT slot for the specified job slot. This function is + * called from the job complete IRQ handler when the previous job has failed. + * + * Return: true if job evicted from NEXT registers, false otherwise + */ +bool kbase_gpu_irq_evict(struct kbase_device *kbdev, int js); + +/** + * kbase_gpu_complete_hw - Complete an atom on job slot js + * + * @kbdev: Device pointer + * @js: Job slot that has completed + * @completion_code: Event code from job that has completed + * @job_tail: The tail address from the hardware if the job has partially + * completed + * @end_timestamp: Time of completion + */ +void kbase_gpu_complete_hw(struct kbase_device *kbdev, int js, + u32 completion_code, + u64 job_tail, + ktime_t *end_timestamp); + +/** + * kbase_gpu_inspect - Inspect the contents of the HW access ringbuffer + * + * @kbdev: Device pointer + * @js: Job slot to inspect + * @idx: Index into ringbuffer. 0 is the job currently running on + * the slot, 1 is the job waiting, all other values are invalid. + * Return: The atom at that position in the ringbuffer + * or NULL if no atom present + */ +struct kbase_jd_atom *kbase_gpu_inspect(struct kbase_device *kbdev, int js, + int idx); + +/** + * kbase_gpu_dump_slots - Print the contents of the slot ringbuffers + * + * @kbdev: Device pointer + */ +void kbase_gpu_dump_slots(struct kbase_device *kbdev); + +#endif /* _KBASE_HWACCESS_GPU_H_ */ diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_js_affinity.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_js_affinity.c new file mode 100644 index 00000000000000..54d8ddd8009760 --- /dev/null +++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_js_affinity.c @@ -0,0 +1,303 @@ +/* + * + * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + + +/* + * Base kernel affinity manager APIs + */ + +#include +#include "mali_kbase_js_affinity.h" +#include "mali_kbase_hw.h" + +#include + + +bool kbase_js_can_run_job_on_slot_no_lock(struct kbase_device *kbdev, + int js) +{ + /* + * Here are the reasons for using job slot 2: + * - BASE_HW_ISSUE_8987 (which is entirely used for that purpose) + * - In absence of the above, then: + * - Atoms with BASE_JD_REQ_COHERENT_GROUP + * - But, only when there aren't contexts with + * KBASEP_JS_CTX_ATTR_COMPUTE_ALL_CORES, because the atoms that run on + * all cores on slot 1 could be blocked by those using a coherent group + * on slot 2 + * - And, only when you actually have 2 or more coregroups - if you + * only have 1 coregroup, then having jobs for slot 2 implies they'd + * also be for slot 1, meaning you'll get interference from them. Jobs + * able to run on slot 2 could also block jobs that can only run on + * slot 1 (tiler jobs) + */ + if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8987)) + return true; + + if (js != 2) + return true; + + /* Only deal with js==2 now: */ + if (kbdev->gpu_props.num_core_groups > 1) { + /* Only use slot 2 in the 2+ coregroup case */ + if (kbasep_js_ctx_attr_is_attr_on_runpool(kbdev, + KBASEP_JS_CTX_ATTR_COMPUTE_ALL_CORES) == + false) { + /* ...But only when we *don't* have atoms that run on + * all cores */ + + /* No specific check for BASE_JD_REQ_COHERENT_GROUP + * atoms - the policy will sort that out */ + return true; + } + } + + /* Above checks failed mean we shouldn't use slot 2 */ + return false; +} + +/* + * As long as it has been decided to have a deeper modification of + * what job scheduler, power manager and affinity manager will + * implement, this function is just an intermediate step that + * assumes: + * - all working cores will be powered on when this is called. + * - largest current configuration is 2 core groups. + * - It has been decided not to have hardcoded values so the low + * and high cores in a core split will be evently distributed. + * - Odd combinations of core requirements have been filtered out + * and do not get to this function (e.g. CS+T+NSS is not + * supported here). + * - This function is frequently called and can be optimized, + * (see notes in loops), but as the functionallity will likely + * be modified, optimization has not been addressed. +*/ +bool kbase_js_choose_affinity(u64 * const affinity, + struct kbase_device *kbdev, + struct kbase_jd_atom *katom, int js) +{ + base_jd_core_req core_req = katom->core_req; + unsigned int num_core_groups = kbdev->gpu_props.num_core_groups; + u64 core_availability_mask; + + lockdep_assert_held(&kbdev->hwaccess_lock); + + core_availability_mask = kbase_pm_ca_get_core_mask(kbdev); + + /* + * If no cores are currently available (core availability policy is + * transitioning) then fail. + */ + if (0 == core_availability_mask) { + *affinity = 0; + return false; + } + + KBASE_DEBUG_ASSERT(js >= 0); + + if ((core_req & (BASE_JD_REQ_FS | BASE_JD_REQ_CS | BASE_JD_REQ_T)) == + BASE_JD_REQ_T) { + /* If the hardware supports XAFFINITY then we'll only enable + * the tiler (which is the default so this is a no-op), + * otherwise enable shader core 0. */ + if (!kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_XAFFINITY)) + *affinity = 1; + else + *affinity = 0; + + return true; + } + + if (1 == kbdev->gpu_props.num_cores) { + /* trivial case only one core, nothing to do */ + *affinity = core_availability_mask & + kbdev->pm.debug_core_mask[js]; + } else { + if ((core_req & (BASE_JD_REQ_COHERENT_GROUP | + BASE_JD_REQ_SPECIFIC_COHERENT_GROUP))) { + if (js == 0 || num_core_groups == 1) { + /* js[0] and single-core-group systems just get + * the first core group */ + *affinity = + kbdev->gpu_props.props.coherency_info.group[0].core_mask + & core_availability_mask & + kbdev->pm.debug_core_mask[js]; + } else { + /* js[1], js[2] use core groups 0, 1 for + * dual-core-group systems */ + u32 core_group_idx = ((u32) js) - 1; + + KBASE_DEBUG_ASSERT(core_group_idx < + num_core_groups); + *affinity = + kbdev->gpu_props.props.coherency_info.group[core_group_idx].core_mask + & core_availability_mask & + kbdev->pm.debug_core_mask[js]; + + /* If the job is specifically targeting core + * group 1 and the core availability policy is + * keeping that core group off, then fail */ + if (*affinity == 0 && core_group_idx == 1 && + kbdev->pm.backend.cg1_disabled + == true) + katom->event_code = + BASE_JD_EVENT_PM_EVENT; + } + } else { + /* All cores are available when no core split is + * required */ + *affinity = core_availability_mask & + kbdev->pm.debug_core_mask[js]; + } + } + + /* + * If no cores are currently available in the desired core group(s) + * (core availability policy is transitioning) then fail. + */ + if (*affinity == 0) + return false; + + /* Enable core 0 if tiler required for hardware without XAFFINITY + * support (notes above) */ + if (core_req & BASE_JD_REQ_T) { + if (!kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_XAFFINITY)) + *affinity = *affinity | 1; + } + + return true; +} + +static inline bool kbase_js_affinity_is_violating( + struct kbase_device *kbdev, + u64 *affinities) +{ + /* This implementation checks whether the two slots involved in Generic + * thread creation have intersecting affinity. This is due to micro- + * architectural issues where a job in slot A targetting cores used by + * slot B could prevent the job in slot B from making progress until the + * job in slot A has completed. + */ + u64 affinity_set_left; + u64 affinity_set_right; + u64 intersection; + + KBASE_DEBUG_ASSERT(affinities != NULL); + + affinity_set_left = affinities[1]; + + affinity_set_right = affinities[2]; + + /* A violation occurs when any bit in the left_set is also in the + * right_set */ + intersection = affinity_set_left & affinity_set_right; + + return (bool) (intersection != (u64) 0u); +} + +bool kbase_js_affinity_would_violate(struct kbase_device *kbdev, int js, + u64 affinity) +{ + struct kbasep_js_device_data *js_devdata; + u64 new_affinities[BASE_JM_MAX_NR_SLOTS]; + + KBASE_DEBUG_ASSERT(kbdev != NULL); + KBASE_DEBUG_ASSERT(js < BASE_JM_MAX_NR_SLOTS); + js_devdata = &kbdev->js_data; + + memcpy(new_affinities, js_devdata->runpool_irq.slot_affinities, + sizeof(js_devdata->runpool_irq.slot_affinities)); + + new_affinities[js] |= affinity; + + return kbase_js_affinity_is_violating(kbdev, new_affinities); +} + +void kbase_js_affinity_retain_slot_cores(struct kbase_device *kbdev, int js, + u64 affinity) +{ + struct kbasep_js_device_data *js_devdata; + u64 cores; + + KBASE_DEBUG_ASSERT(kbdev != NULL); + KBASE_DEBUG_ASSERT(js < BASE_JM_MAX_NR_SLOTS); + js_devdata = &kbdev->js_data; + + KBASE_DEBUG_ASSERT(kbase_js_affinity_would_violate(kbdev, js, affinity) + == false); + + cores = affinity; + while (cores) { + int bitnum = fls64(cores) - 1; + u64 bit = 1ULL << bitnum; + s8 cnt; + + cnt = + ++(js_devdata->runpool_irq.slot_affinity_refcount[js][bitnum]); + + if (cnt == 1) + js_devdata->runpool_irq.slot_affinities[js] |= bit; + + cores &= ~bit; + } +} + +void kbase_js_affinity_release_slot_cores(struct kbase_device *kbdev, int js, + u64 affinity) +{ + struct kbasep_js_device_data *js_devdata; + u64 cores; + + KBASE_DEBUG_ASSERT(kbdev != NULL); + KBASE_DEBUG_ASSERT(js < BASE_JM_MAX_NR_SLOTS); + js_devdata = &kbdev->js_data; + + cores = affinity; + while (cores) { + int bitnum = fls64(cores) - 1; + u64 bit = 1ULL << bitnum; + s8 cnt; + + KBASE_DEBUG_ASSERT( + js_devdata->runpool_irq.slot_affinity_refcount[js][bitnum] > 0); + + cnt = + --(js_devdata->runpool_irq.slot_affinity_refcount[js][bitnum]); + + if (0 == cnt) + js_devdata->runpool_irq.slot_affinities[js] &= ~bit; + + cores &= ~bit; + } +} + +#if KBASE_TRACE_ENABLE +void kbase_js_debug_log_current_affinities(struct kbase_device *kbdev) +{ + struct kbasep_js_device_data *js_devdata; + int slot_nr; + + KBASE_DEBUG_ASSERT(kbdev != NULL); + js_devdata = &kbdev->js_data; + + for (slot_nr = 0; slot_nr < 3; ++slot_nr) + KBASE_TRACE_ADD_SLOT_INFO(kbdev, JS_AFFINITY_CURRENT, NULL, + NULL, 0u, slot_nr, + (u32) js_devdata->runpool_irq.slot_affinities[slot_nr]); +} +#endif /* KBASE_TRACE_ENABLE */ diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_js_affinity.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_js_affinity.h new file mode 100644 index 00000000000000..35d9781ae092c6 --- /dev/null +++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_js_affinity.h @@ -0,0 +1,129 @@ +/* + * + * (C) COPYRIGHT 2011-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + + +/* + * Affinity Manager internal APIs. + */ + +#ifndef _KBASE_JS_AFFINITY_H_ +#define _KBASE_JS_AFFINITY_H_ + +/** + * kbase_js_can_run_job_on_slot_no_lock - Decide whether it is possible to + * submit a job to a particular job slot in the current status + * + * @kbdev: The kbase device structure of the device + * @js: Job slot number to check for allowance + * + * Will check if submitting to the given job slot is allowed in the current + * status. For example using job slot 2 while in soft-stoppable state and only + * having 1 coregroup is not allowed by the policy. This function should be + * called prior to submitting a job to a slot to make sure policy rules are not + * violated. + * + * The following locking conditions are made on the caller + * - it must hold hwaccess_lock + */ +bool kbase_js_can_run_job_on_slot_no_lock(struct kbase_device *kbdev, int js); + +/** + * kbase_js_choose_affinity - Compute affinity for a given job. + * + * @affinity: Affinity bitmap computed + * @kbdev: The kbase device structure of the device + * @katom: Job chain of which affinity is going to be found + * @js: Slot the job chain is being submitted + * + * Currently assumes an all-on/all-off power management policy. + * Also assumes there is at least one core with tiler available. + * + * Returns true if a valid affinity was chosen, false if + * no cores were available. + */ +bool kbase_js_choose_affinity(u64 * const affinity, + struct kbase_device *kbdev, + struct kbase_jd_atom *katom, + int js); + +/** + * kbase_js_affinity_would_violate - Determine whether a proposed affinity on + * job slot @js would cause a violation of affinity restrictions. + * + * @kbdev: Kbase device structure + * @js: The job slot to test + * @affinity: The affinity mask to test + * + * The following locks must be held by the caller + * - hwaccess_lock + * + * Return: true if the affinity would violate the restrictions + */ +bool kbase_js_affinity_would_violate(struct kbase_device *kbdev, int js, + u64 affinity); + +/** + * kbase_js_affinity_retain_slot_cores - Affinity tracking: retain cores used by + * a slot + * + * @kbdev: Kbase device structure + * @js: The job slot retaining the cores + * @affinity: The cores to retain + * + * The following locks must be held by the caller + * - hwaccess_lock + */ +void kbase_js_affinity_retain_slot_cores(struct kbase_device *kbdev, int js, + u64 affinity); + +/** + * kbase_js_affinity_release_slot_cores - Affinity tracking: release cores used + * by a slot + * + * @kbdev: Kbase device structure + * @js: Job slot + * @affinity: Bit mask of core to be released + * + * Cores must be released as soon as a job is dequeued from a slot's 'submit + * slots', and before another job is submitted to those slots. Otherwise, the + * refcount could exceed the maximum number submittable to a slot, + * %BASE_JM_SUBMIT_SLOTS. + * + * The following locks must be held by the caller + * - hwaccess_lock + */ +void kbase_js_affinity_release_slot_cores(struct kbase_device *kbdev, int js, + u64 affinity); + +/** + * kbase_js_debug_log_current_affinities - log the current affinities + * + * @kbdev: Kbase device structure + * + * Output to the Trace log the current tracked affinities on all slots + */ +#if KBASE_TRACE_ENABLE +void kbase_js_debug_log_current_affinities(struct kbase_device *kbdev); +#else /* KBASE_TRACE_ENABLE */ +static inline void +kbase_js_debug_log_current_affinities(struct kbase_device *kbdev) +{ +} +#endif /* KBASE_TRACE_ENABLE */ + +#endif /* _KBASE_JS_AFFINITY_H_ */ diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_js_backend.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_js_backend.c new file mode 100644 index 00000000000000..63d048f7a87960 --- /dev/null +++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_js_backend.c @@ -0,0 +1,348 @@ +/* + * + * (C) COPYRIGHT 2014-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + +/* + * Register-based HW access backend specific job scheduler APIs + */ + +#include +#include +#include +#include + +/* + * Hold the runpool_mutex for this + */ +static inline bool timer_callback_should_run(struct kbase_device *kbdev) +{ + struct kbase_backend_data *backend = &kbdev->hwaccess.backend; + s8 nr_running_ctxs; + + lockdep_assert_held(&kbdev->js_data.runpool_mutex); + + /* Timer must stop if we are suspending */ + if (backend->suspend_timer) + return false; + + /* nr_contexts_pullable is updated with the runpool_mutex. However, the + * locking in the caller gives us a barrier that ensures + * nr_contexts_pullable is up-to-date for reading */ + nr_running_ctxs = atomic_read(&kbdev->js_data.nr_contexts_runnable); + +#ifdef CONFIG_MALI_DEBUG + if (kbdev->js_data.softstop_always) { + /* Debug support for allowing soft-stop on a single context */ + return true; + } +#endif /* CONFIG_MALI_DEBUG */ + + if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_9435)) { + /* Timeouts would have to be 4x longer (due to micro- + * architectural design) to support OpenCL conformance tests, so + * only run the timer when there's: + * - 2 or more CL contexts + * - 1 or more GLES contexts + * + * NOTE: We will treat a context that has both Compute and Non- + * Compute jobs will be treated as an OpenCL context (hence, we + * don't check KBASEP_JS_CTX_ATTR_NON_COMPUTE). + */ + { + s8 nr_compute_ctxs = + kbasep_js_ctx_attr_count_on_runpool(kbdev, + KBASEP_JS_CTX_ATTR_COMPUTE); + s8 nr_noncompute_ctxs = nr_running_ctxs - + nr_compute_ctxs; + + return (bool) (nr_compute_ctxs >= 2 || + nr_noncompute_ctxs > 0); + } + } else { + /* Run the timer callback whenever you have at least 1 context + */ + return (bool) (nr_running_ctxs > 0); + } +} + +static enum hrtimer_restart timer_callback(struct hrtimer *timer) +{ + unsigned long flags; + struct kbase_device *kbdev; + struct kbasep_js_device_data *js_devdata; + struct kbase_backend_data *backend; + int s; + bool reset_needed = false; + + KBASE_DEBUG_ASSERT(timer != NULL); + + backend = container_of(timer, struct kbase_backend_data, + scheduling_timer); + kbdev = container_of(backend, struct kbase_device, hwaccess.backend); + js_devdata = &kbdev->js_data; + + /* Loop through the slots */ + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); + for (s = 0; s < kbdev->gpu_props.num_job_slots; s++) { + struct kbase_jd_atom *atom = NULL; + + if (kbase_backend_nr_atoms_on_slot(kbdev, s) > 0) { + atom = kbase_gpu_inspect(kbdev, s, 0); + KBASE_DEBUG_ASSERT(atom != NULL); + } + + if (atom != NULL) { + /* The current version of the model doesn't support + * Soft-Stop */ + if (!kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_5736)) { + u32 ticks = atom->ticks++; + +#ifndef CONFIG_MALI_JOB_DUMP + u32 soft_stop_ticks, hard_stop_ticks, + gpu_reset_ticks; + if (atom->core_req & BASE_JD_REQ_ONLY_COMPUTE) { + soft_stop_ticks = + js_devdata->soft_stop_ticks_cl; + hard_stop_ticks = + js_devdata->hard_stop_ticks_cl; + gpu_reset_ticks = + js_devdata->gpu_reset_ticks_cl; + } else { + soft_stop_ticks = + js_devdata->soft_stop_ticks; + hard_stop_ticks = + js_devdata->hard_stop_ticks_ss; + gpu_reset_ticks = + js_devdata->gpu_reset_ticks_ss; + } + + /* If timeouts have been changed then ensure + * that atom tick count is not greater than the + * new soft_stop timeout. This ensures that + * atoms do not miss any of the timeouts due to + * races between this worker and the thread + * changing the timeouts. */ + if (backend->timeouts_updated && + ticks > soft_stop_ticks) + ticks = atom->ticks = soft_stop_ticks; + + /* Job is Soft-Stoppable */ + if (ticks == soft_stop_ticks) { + int disjoint_threshold = + KBASE_DISJOINT_STATE_INTERLEAVED_CONTEXT_COUNT_THRESHOLD; + u32 softstop_flags = 0u; + /* Job has been scheduled for at least + * js_devdata->soft_stop_ticks ticks. + * Soft stop the slot so we can run + * other jobs. + */ + dev_dbg(kbdev->dev, "Soft-stop"); +#if !KBASE_DISABLE_SCHEDULING_SOFT_STOPS + /* nr_user_contexts_running is updated + * with the runpool_mutex, but we can't + * take that here. + * + * However, if it's about to be + * increased then the new context can't + * run any jobs until they take the + * hwaccess_lock, so it's OK to observe + * the older value. + * + * Similarly, if it's about to be + * decreased, the last job from another + * context has already finished, so it's + * not too bad that we observe the older + * value and register a disjoint event + * when we try soft-stopping */ + if (js_devdata->nr_user_contexts_running + >= disjoint_threshold) + softstop_flags |= + JS_COMMAND_SW_CAUSES_DISJOINT; + + kbase_job_slot_softstop_swflags(kbdev, + s, atom, softstop_flags); +#endif + } else if (ticks == hard_stop_ticks) { + /* Job has been scheduled for at least + * js_devdata->hard_stop_ticks_ss ticks. + * It should have been soft-stopped by + * now. Hard stop the slot. + */ +#if !KBASE_DISABLE_SCHEDULING_HARD_STOPS + int ms = + js_devdata->scheduling_period_ns + / 1000000u; + dev_warn(kbdev->dev, "JS: Job Hard-Stopped (took more than %lu ticks at %lu ms/tick)", + (unsigned long)ticks, + (unsigned long)ms); + kbase_job_slot_hardstop(atom->kctx, s, + atom); +#endif + } else if (ticks == gpu_reset_ticks) { + /* Job has been scheduled for at least + * js_devdata->gpu_reset_ticks_ss ticks. + * It should have left the GPU by now. + * Signal that the GPU needs to be + * reset. + */ + reset_needed = true; + } +#else /* !CONFIG_MALI_JOB_DUMP */ + /* NOTE: During CONFIG_MALI_JOB_DUMP, we use + * the alternate timeouts, which makes the hard- + * stop and GPU reset timeout much longer. We + * also ensure that we don't soft-stop at all. + */ + if (ticks == js_devdata->soft_stop_ticks) { + /* Job has been scheduled for at least + * js_devdata->soft_stop_ticks. We do + * not soft-stop during + * CONFIG_MALI_JOB_DUMP, however. + */ + dev_dbg(kbdev->dev, "Soft-stop"); + } else if (ticks == + js_devdata->hard_stop_ticks_dumping) { + /* Job has been scheduled for at least + * js_devdata->hard_stop_ticks_dumping + * ticks. Hard stop the slot. + */ +#if !KBASE_DISABLE_SCHEDULING_HARD_STOPS + int ms = + js_devdata->scheduling_period_ns + / 1000000u; + dev_warn(kbdev->dev, "JS: Job Hard-Stopped (took more than %lu ticks at %lu ms/tick)", + (unsigned long)ticks, + (unsigned long)ms); + kbase_job_slot_hardstop(atom->kctx, s, + atom); +#endif + } else if (ticks == + js_devdata->gpu_reset_ticks_dumping) { + /* Job has been scheduled for at least + * js_devdata->gpu_reset_ticks_dumping + * ticks. It should have left the GPU by + * now. Signal that the GPU needs to be + * reset. + */ + reset_needed = true; + } +#endif /* !CONFIG_MALI_JOB_DUMP */ + } + } + } +#if KBASE_GPU_RESET_EN + if (reset_needed) { + dev_err(kbdev->dev, "JS: Job has been on the GPU for too long (JS_RESET_TICKS_SS/DUMPING timeout hit). Issueing GPU soft-reset to resolve."); + + if (kbase_prepare_to_reset_gpu_locked(kbdev)) + kbase_reset_gpu_locked(kbdev); + } +#endif /* KBASE_GPU_RESET_EN */ + /* the timer is re-issued if there is contexts in the run-pool */ + + if (backend->timer_running) + hrtimer_start(&backend->scheduling_timer, + HR_TIMER_DELAY_NSEC(js_devdata->scheduling_period_ns), + HRTIMER_MODE_REL); + + backend->timeouts_updated = false; + + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); + + return HRTIMER_NORESTART; +} + +void kbase_backend_ctx_count_changed(struct kbase_device *kbdev) +{ + struct kbasep_js_device_data *js_devdata = &kbdev->js_data; + struct kbase_backend_data *backend = &kbdev->hwaccess.backend; + unsigned long flags; + + lockdep_assert_held(&js_devdata->runpool_mutex); + + if (!timer_callback_should_run(kbdev)) { + /* Take spinlock to force synchronisation with timer */ + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); + backend->timer_running = false; + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); + /* From now on, return value of timer_callback_should_run() will + * also cause the timer to not requeue itself. Its return value + * cannot change, because it depends on variables updated with + * the runpool_mutex held, which the caller of this must also + * hold */ + hrtimer_cancel(&backend->scheduling_timer); + } + + if (timer_callback_should_run(kbdev) && !backend->timer_running) { + /* Take spinlock to force synchronisation with timer */ + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); + backend->timer_running = true; + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); + hrtimer_start(&backend->scheduling_timer, + HR_TIMER_DELAY_NSEC(js_devdata->scheduling_period_ns), + HRTIMER_MODE_REL); + + KBASE_TRACE_ADD(kbdev, JS_POLICY_TIMER_START, NULL, NULL, 0u, + 0u); + } +} + +int kbase_backend_timer_init(struct kbase_device *kbdev) +{ + struct kbase_backend_data *backend = &kbdev->hwaccess.backend; + + hrtimer_init(&backend->scheduling_timer, CLOCK_MONOTONIC, + HRTIMER_MODE_REL); + backend->scheduling_timer.function = timer_callback; + + backend->timer_running = false; + + return 0; +} + +void kbase_backend_timer_term(struct kbase_device *kbdev) +{ + struct kbase_backend_data *backend = &kbdev->hwaccess.backend; + + hrtimer_cancel(&backend->scheduling_timer); +} + +void kbase_backend_timer_suspend(struct kbase_device *kbdev) +{ + struct kbase_backend_data *backend = &kbdev->hwaccess.backend; + + backend->suspend_timer = true; + + kbase_backend_ctx_count_changed(kbdev); +} + +void kbase_backend_timer_resume(struct kbase_device *kbdev) +{ + struct kbase_backend_data *backend = &kbdev->hwaccess.backend; + + backend->suspend_timer = false; + + kbase_backend_ctx_count_changed(kbdev); +} + +void kbase_backend_timeouts_changed(struct kbase_device *kbdev) +{ + struct kbase_backend_data *backend = &kbdev->hwaccess.backend; + + backend->timeouts_updated = true; +} + diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_js_internal.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_js_internal.h new file mode 100644 index 00000000000000..3f53779c67471f --- /dev/null +++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_js_internal.h @@ -0,0 +1,69 @@ +/* + * + * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + +/* + * Register-based HW access backend specific job scheduler APIs + */ + +#ifndef _KBASE_JS_BACKEND_H_ +#define _KBASE_JS_BACKEND_H_ + +/** + * kbase_backend_timer_init() - Initialise the JS scheduling timer + * @kbdev: Device pointer + * + * This function should be called at driver initialisation + * + * Return: 0 on success + */ +int kbase_backend_timer_init(struct kbase_device *kbdev); + +/** + * kbase_backend_timer_term() - Terminate the JS scheduling timer + * @kbdev: Device pointer + * + * This function should be called at driver termination + */ +void kbase_backend_timer_term(struct kbase_device *kbdev); + +/** + * kbase_backend_timer_suspend - Suspend is happening, stop the JS scheduling + * timer + * @kbdev: Device pointer + * + * This function should be called on suspend, after the active count has reached + * zero. This is required as the timer may have been started on job submission + * to the job scheduler, but before jobs are submitted to the GPU. + * + * Caller must hold runpool_mutex. + */ +void kbase_backend_timer_suspend(struct kbase_device *kbdev); + +/** + * kbase_backend_timer_resume - Resume is happening, re-evaluate the JS + * scheduling timer + * @kbdev: Device pointer + * + * This function should be called on resume. Note that is is not guaranteed to + * re-start the timer, only evalute whether it should be re-started. + * + * Caller must hold runpool_mutex. + */ +void kbase_backend_timer_resume(struct kbase_device *kbdev); + +#endif /* _KBASE_JS_BACKEND_H_ */ diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_mmu_hw_direct.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_mmu_hw_direct.c new file mode 100644 index 00000000000000..aa1817c8bca92c --- /dev/null +++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_mmu_hw_direct.c @@ -0,0 +1,401 @@ +/* + * + * (C) COPYRIGHT 2014-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +#include + +#include +#include +#include +#include +#include +#include + +static inline u64 lock_region(struct kbase_device *kbdev, u64 pfn, + u32 num_pages) +{ + u64 region; + + /* can't lock a zero sized range */ + KBASE_DEBUG_ASSERT(num_pages); + + region = pfn << PAGE_SHIFT; + /* + * fls returns (given the ASSERT above): + * 1 .. 32 + * + * 10 + fls(num_pages) + * results in the range (11 .. 42) + */ + + /* gracefully handle num_pages being zero */ + if (0 == num_pages) { + region |= 11; + } else { + u8 region_width; + + region_width = 10 + fls(num_pages); + if (num_pages != (1ul << (region_width - 11))) { + /* not pow2, so must go up to the next pow2 */ + region_width += 1; + } + KBASE_DEBUG_ASSERT(region_width <= KBASE_LOCK_REGION_MAX_SIZE); + KBASE_DEBUG_ASSERT(region_width >= KBASE_LOCK_REGION_MIN_SIZE); + region |= region_width; + } + + return region; +} + +static int wait_ready(struct kbase_device *kbdev, + unsigned int as_nr, struct kbase_context *kctx) +{ + unsigned int max_loops = KBASE_AS_INACTIVE_MAX_LOOPS; + u32 val = kbase_reg_read(kbdev, MMU_AS_REG(as_nr, AS_STATUS), kctx); + + /* Wait for the MMU status to indicate there is no active command, in + * case one is pending. Do not log remaining register accesses. */ + while (--max_loops && (val & AS_STATUS_AS_ACTIVE)) + val = kbase_reg_read(kbdev, MMU_AS_REG(as_nr, AS_STATUS), NULL); + + if (max_loops == 0) { + dev_err(kbdev->dev, "AS_ACTIVE bit stuck\n"); + return -1; + } + + /* If waiting in loop was performed, log last read value. */ + if (KBASE_AS_INACTIVE_MAX_LOOPS - 1 > max_loops) + kbase_reg_read(kbdev, MMU_AS_REG(as_nr, AS_STATUS), kctx); + + return 0; +} + +static int write_cmd(struct kbase_device *kbdev, int as_nr, u32 cmd, + struct kbase_context *kctx) +{ + int status; + + /* write AS_COMMAND when MMU is ready to accept another command */ + status = wait_ready(kbdev, as_nr, kctx); + if (status == 0) + kbase_reg_write(kbdev, MMU_AS_REG(as_nr, AS_COMMAND), cmd, + kctx); + + return status; +} + +static void validate_protected_page_fault(struct kbase_device *kbdev, + struct kbase_context *kctx) +{ + /* GPUs which support (native) protected mode shall not report page + * fault addresses unless it has protected debug mode and protected + * debug mode is turned on */ + u32 protected_debug_mode = 0; + + if (!kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_PROTECTED_MODE)) + return; + + if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_PROTECTED_DEBUG_MODE)) { + protected_debug_mode = kbase_reg_read(kbdev, + GPU_CONTROL_REG(GPU_STATUS), + kctx) & GPU_DBGEN; + } + + if (!protected_debug_mode) { + /* fault_addr should never be reported in protected mode. + * However, we just continue by printing an error message */ + dev_err(kbdev->dev, "Fault address reported in protected mode\n"); + } +} + +void kbase_mmu_interrupt(struct kbase_device *kbdev, u32 irq_stat) +{ + const int num_as = 16; + const int busfault_shift = MMU_PAGE_FAULT_FLAGS; + const int pf_shift = 0; + const unsigned long as_bit_mask = (1UL << num_as) - 1; + unsigned long flags; + u32 new_mask; + u32 tmp; + + /* bus faults */ + u32 bf_bits = (irq_stat >> busfault_shift) & as_bit_mask; + /* page faults (note: Ignore ASes with both pf and bf) */ + u32 pf_bits = ((irq_stat >> pf_shift) & as_bit_mask) & ~bf_bits; + + KBASE_DEBUG_ASSERT(NULL != kbdev); + + /* remember current mask */ + spin_lock_irqsave(&kbdev->mmu_mask_change, flags); + new_mask = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_MASK), NULL); + /* mask interrupts for now */ + kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), 0, NULL); + spin_unlock_irqrestore(&kbdev->mmu_mask_change, flags); + + while (bf_bits | pf_bits) { + struct kbase_as *as; + int as_no; + struct kbase_context *kctx; + + /* + * the while logic ensures we have a bit set, no need to check + * for not-found here + */ + as_no = ffs(bf_bits | pf_bits) - 1; + as = &kbdev->as[as_no]; + + /* + * Refcount the kctx ASAP - it shouldn't disappear anyway, since + * Bus/Page faults _should_ only occur whilst jobs are running, + * and a job causing the Bus/Page fault shouldn't complete until + * the MMU is updated + */ + kctx = kbasep_js_runpool_lookup_ctx(kbdev, as_no); + + + /* find faulting address */ + as->fault_addr = kbase_reg_read(kbdev, + MMU_AS_REG(as_no, + AS_FAULTADDRESS_HI), + kctx); + as->fault_addr <<= 32; + as->fault_addr |= kbase_reg_read(kbdev, + MMU_AS_REG(as_no, + AS_FAULTADDRESS_LO), + kctx); + + /* Mark the fault protected or not */ + as->protected_mode = kbdev->protected_mode; + + if (kbdev->protected_mode && as->fault_addr) + { + /* check if address reporting is allowed */ + validate_protected_page_fault(kbdev, kctx); + } + + /* report the fault to debugfs */ + kbase_as_fault_debugfs_new(kbdev, as_no); + + /* record the fault status */ + as->fault_status = kbase_reg_read(kbdev, + MMU_AS_REG(as_no, + AS_FAULTSTATUS), + kctx); + + /* find the fault type */ + as->fault_type = (bf_bits & (1 << as_no)) ? + KBASE_MMU_FAULT_TYPE_BUS : + KBASE_MMU_FAULT_TYPE_PAGE; + + if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_AARCH64_MMU)) { + as->fault_extra_addr = kbase_reg_read(kbdev, + MMU_AS_REG(as_no, AS_FAULTEXTRA_HI), + kctx); + as->fault_extra_addr <<= 32; + as->fault_extra_addr |= kbase_reg_read(kbdev, + MMU_AS_REG(as_no, AS_FAULTEXTRA_LO), + kctx); + } + + if (kbase_as_has_bus_fault(as)) { + /* Mark bus fault as handled. + * Note that a bus fault is processed first in case + * where both a bus fault and page fault occur. + */ + bf_bits &= ~(1UL << as_no); + + /* remove the queued BF (and PF) from the mask */ + new_mask &= ~(MMU_BUS_ERROR(as_no) | + MMU_PAGE_FAULT(as_no)); + } else { + /* Mark page fault as handled */ + pf_bits &= ~(1UL << as_no); + + /* remove the queued PF from the mask */ + new_mask &= ~MMU_PAGE_FAULT(as_no); + } + + /* Process the interrupt for this address space */ + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); + kbase_mmu_interrupt_process(kbdev, kctx, as); + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); + } + + /* reenable interrupts */ + spin_lock_irqsave(&kbdev->mmu_mask_change, flags); + tmp = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_MASK), NULL); + new_mask |= tmp; + kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), new_mask, NULL); + spin_unlock_irqrestore(&kbdev->mmu_mask_change, flags); +} + +void kbase_mmu_hw_configure(struct kbase_device *kbdev, struct kbase_as *as, + struct kbase_context *kctx) +{ + struct kbase_mmu_setup *current_setup = &as->current_setup; + u32 transcfg = 0; + + if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_AARCH64_MMU)) { + transcfg = current_setup->transcfg & 0xFFFFFFFFUL; + + /* Set flag AS_TRANSCFG_PTW_MEMATTR_WRITE_BACK */ + /* Clear PTW_MEMATTR bits */ + transcfg &= ~AS_TRANSCFG_PTW_MEMATTR_MASK; + /* Enable correct PTW_MEMATTR bits */ + transcfg |= AS_TRANSCFG_PTW_MEMATTR_WRITE_BACK; + + if (kbdev->system_coherency == COHERENCY_ACE) { + /* Set flag AS_TRANSCFG_PTW_SH_OS (outer shareable) */ + /* Clear PTW_SH bits */ + transcfg = (transcfg & ~AS_TRANSCFG_PTW_SH_MASK); + /* Enable correct PTW_SH bits */ + transcfg = (transcfg | AS_TRANSCFG_PTW_SH_OS); + } + + kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_TRANSCFG_LO), + transcfg, kctx); + kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_TRANSCFG_HI), + (current_setup->transcfg >> 32) & 0xFFFFFFFFUL, + kctx); + } else { + if (kbdev->system_coherency == COHERENCY_ACE) + current_setup->transtab |= AS_TRANSTAB_LPAE_SHARE_OUTER; + } + + kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_TRANSTAB_LO), + current_setup->transtab & 0xFFFFFFFFUL, kctx); + kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_TRANSTAB_HI), + (current_setup->transtab >> 32) & 0xFFFFFFFFUL, kctx); + + kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_MEMATTR_LO), + current_setup->memattr & 0xFFFFFFFFUL, kctx); + kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_MEMATTR_HI), + (current_setup->memattr >> 32) & 0xFFFFFFFFUL, kctx); + + KBASE_TLSTREAM_TL_ATTRIB_AS_CONFIG(as, + current_setup->transtab, + current_setup->memattr, + transcfg); + + write_cmd(kbdev, as->number, AS_COMMAND_UPDATE, kctx); +} + +int kbase_mmu_hw_do_operation(struct kbase_device *kbdev, struct kbase_as *as, + struct kbase_context *kctx, u64 vpfn, u32 nr, u32 op, + unsigned int handling_irq) +{ + int ret; + + lockdep_assert_held(&kbdev->mmu_hw_mutex); + + if (op == AS_COMMAND_UNLOCK) { + /* Unlock doesn't require a lock first */ + ret = write_cmd(kbdev, as->number, AS_COMMAND_UNLOCK, kctx); + } else { + u64 lock_addr = lock_region(kbdev, vpfn, nr); + + /* Lock the region that needs to be updated */ + kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_LOCKADDR_LO), + lock_addr & 0xFFFFFFFFUL, kctx); + kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_LOCKADDR_HI), + (lock_addr >> 32) & 0xFFFFFFFFUL, kctx); + write_cmd(kbdev, as->number, AS_COMMAND_LOCK, kctx); + + /* Run the MMU operation */ + write_cmd(kbdev, as->number, op, kctx); + + /* Wait for the flush to complete */ + ret = wait_ready(kbdev, as->number, kctx); + + if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_9630)) { + /* Issue an UNLOCK command to ensure that valid page + tables are re-read by the GPU after an update. + Note that, the FLUSH command should perform all the + actions necessary, however the bus logs show that if + multiple page faults occur within an 8 page region + the MMU does not always re-read the updated page + table entries for later faults or is only partially + read, it subsequently raises the page fault IRQ for + the same addresses, the unlock ensures that the MMU + cache is flushed, so updates can be re-read. As the + region is now unlocked we need to issue 2 UNLOCK + commands in order to flush the MMU/uTLB, + see PRLAM-8812. + */ + write_cmd(kbdev, as->number, AS_COMMAND_UNLOCK, kctx); + write_cmd(kbdev, as->number, AS_COMMAND_UNLOCK, kctx); + } + } + + return ret; +} + +void kbase_mmu_hw_clear_fault(struct kbase_device *kbdev, struct kbase_as *as, + struct kbase_context *kctx, enum kbase_mmu_fault_type type) +{ + unsigned long flags; + u32 pf_bf_mask; + + spin_lock_irqsave(&kbdev->mmu_mask_change, flags); + + /* + * A reset is in-flight and we're flushing the IRQ + bottom half + * so don't update anything as it could race with the reset code. + */ + if (kbdev->irq_reset_flush) + goto unlock; + + /* Clear the page (and bus fault IRQ as well in case one occurred) */ + pf_bf_mask = MMU_PAGE_FAULT(as->number); + if (type == KBASE_MMU_FAULT_TYPE_BUS || + type == KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED) + pf_bf_mask |= MMU_BUS_ERROR(as->number); + + kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), pf_bf_mask, kctx); + +unlock: + spin_unlock_irqrestore(&kbdev->mmu_mask_change, flags); +} + +void kbase_mmu_hw_enable_fault(struct kbase_device *kbdev, struct kbase_as *as, + struct kbase_context *kctx, enum kbase_mmu_fault_type type) +{ + unsigned long flags; + u32 irq_mask; + + /* Enable the page fault IRQ (and bus fault IRQ as well in case one + * occurred) */ + spin_lock_irqsave(&kbdev->mmu_mask_change, flags); + + /* + * A reset is in-flight and we're flushing the IRQ + bottom half + * so don't update anything as it could race with the reset code. + */ + if (kbdev->irq_reset_flush) + goto unlock; + + irq_mask = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_MASK), kctx) | + MMU_PAGE_FAULT(as->number); + + if (type == KBASE_MMU_FAULT_TYPE_BUS || + type == KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED) + irq_mask |= MMU_BUS_ERROR(as->number); + + kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), irq_mask, kctx); + +unlock: + spin_unlock_irqrestore(&kbdev->mmu_mask_change, flags); +} diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_mmu_hw_direct.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_mmu_hw_direct.h new file mode 100644 index 00000000000000..c02253c6acc301 --- /dev/null +++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_mmu_hw_direct.h @@ -0,0 +1,42 @@ +/* + * + * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +/* + * Interface file for the direct implementation for MMU hardware access + * + * Direct MMU hardware interface + * + * This module provides the interface(s) that are required by the direct + * register access implementation of the MMU hardware interface + */ + +#ifndef _MALI_KBASE_MMU_HW_DIRECT_H_ +#define _MALI_KBASE_MMU_HW_DIRECT_H_ + +#include + +/** + * kbase_mmu_interrupt - Process an MMU interrupt. + * + * Process the MMU interrupt that was reported by the &kbase_device. + * + * @kbdev: kbase context to clear the fault from. + * @irq_stat: Value of the MMU_IRQ_STATUS register + */ +void kbase_mmu_interrupt(struct kbase_device *kbdev, u32 irq_stat); + +#endif /* _MALI_KBASE_MMU_HW_DIRECT_H_ */ diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_always_on.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_always_on.c new file mode 100644 index 00000000000000..0614348e935ac6 --- /dev/null +++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_always_on.c @@ -0,0 +1,63 @@ +/* + * + * (C) COPYRIGHT 2010-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + + +/* + * "Always on" power management policy + */ + +#include +#include + +static u64 always_on_get_core_mask(struct kbase_device *kbdev) +{ + return kbdev->gpu_props.props.raw_props.shader_present; +} + +static bool always_on_get_core_active(struct kbase_device *kbdev) +{ + return true; +} + +static void always_on_init(struct kbase_device *kbdev) +{ + CSTD_UNUSED(kbdev); +} + +static void always_on_term(struct kbase_device *kbdev) +{ + CSTD_UNUSED(kbdev); +} + +/* + * The struct kbase_pm_policy structure for the demand power policy. + * + * This is the static structure that defines the demand power policy's callback + * and name. + */ +const struct kbase_pm_policy kbase_pm_always_on_policy_ops = { + "always_on", /* name */ + always_on_init, /* init */ + always_on_term, /* term */ + always_on_get_core_mask, /* get_core_mask */ + always_on_get_core_active, /* get_core_active */ + 0u, /* flags */ + KBASE_PM_POLICY_ID_ALWAYS_ON, /* id */ +}; + +KBASE_EXPORT_TEST_API(kbase_pm_always_on_policy_ops); diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_always_on.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_always_on.h new file mode 100644 index 00000000000000..f9d244b01bc235 --- /dev/null +++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_always_on.h @@ -0,0 +1,77 @@ + +/* + * + * (C) COPYRIGHT 2011-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + + +/* + * "Always on" power management policy + */ + +#ifndef MALI_KBASE_PM_ALWAYS_ON_H +#define MALI_KBASE_PM_ALWAYS_ON_H + +/** + * DOC: + * The "Always on" power management policy has the following + * characteristics: + * + * - When KBase indicates that the GPU will be powered up, but we don't yet + * know which Job Chains are to be run: + * All Shader Cores are powered up, regardless of whether or not they will + * be needed later. + * + * - When KBase indicates that a set of Shader Cores are needed to submit the + * currently queued Job Chains: + * All Shader Cores are kept powered, regardless of whether or not they will + * be needed + * + * - When KBase indicates that the GPU need not be powered: + * The Shader Cores are kept powered, regardless of whether or not they will + * be needed. The GPU itself is also kept powered, even though it is not + * needed. + * + * This policy is automatically overridden during system suspend: the desired + * core state is ignored, and the cores are forced off regardless of what the + * policy requests. After resuming from suspend, new changes to the desired + * core state made by the policy are honored. + * + * Note: + * + * - KBase indicates the GPU will be powered up when it has a User Process that + * has just started to submit Job Chains. + * + * - KBase indicates the GPU need not be powered when all the Job Chains from + * User Processes have finished, and it is waiting for a User Process to + * submit some more Job Chains. + */ + +/** + * struct kbasep_pm_policy_always_on - Private struct for policy instance data + * @dummy: unused dummy variable + * + * This contains data that is private to the particular power policy that is + * active. + */ +struct kbasep_pm_policy_always_on { + int dummy; +}; + +extern const struct kbase_pm_policy kbase_pm_always_on_policy_ops; + +#endif /* MALI_KBASE_PM_ALWAYS_ON_H */ + diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_backend.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_backend.c new file mode 100644 index 00000000000000..a871eae70f7d0c --- /dev/null +++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_backend.c @@ -0,0 +1,496 @@ +/* + * + * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + +/* + * GPU backend implementation of base kernel power management APIs + */ + +#include +#include +#include + +#include +#include +#include +#include +#include + +static void kbase_pm_gpu_poweroff_wait_wq(struct work_struct *data); + +int kbase_pm_runtime_init(struct kbase_device *kbdev) +{ + struct kbase_pm_callback_conf *callbacks; + + callbacks = (struct kbase_pm_callback_conf *)POWER_MANAGEMENT_CALLBACKS; + if (callbacks) { + kbdev->pm.backend.callback_power_on = + callbacks->power_on_callback; + kbdev->pm.backend.callback_power_off = + callbacks->power_off_callback; + kbdev->pm.backend.callback_power_suspend = + callbacks->power_suspend_callback; + kbdev->pm.backend.callback_power_resume = + callbacks->power_resume_callback; + kbdev->pm.callback_power_runtime_init = + callbacks->power_runtime_init_callback; + kbdev->pm.callback_power_runtime_term = + callbacks->power_runtime_term_callback; + kbdev->pm.backend.callback_power_runtime_on = + callbacks->power_runtime_on_callback; + kbdev->pm.backend.callback_power_runtime_off = + callbacks->power_runtime_off_callback; + kbdev->pm.backend.callback_power_runtime_idle = + callbacks->power_runtime_idle_callback; + + if (callbacks->power_runtime_init_callback) + return callbacks->power_runtime_init_callback(kbdev); + else + return 0; + } + + kbdev->pm.backend.callback_power_on = NULL; + kbdev->pm.backend.callback_power_off = NULL; + kbdev->pm.backend.callback_power_suspend = NULL; + kbdev->pm.backend.callback_power_resume = NULL; + kbdev->pm.callback_power_runtime_init = NULL; + kbdev->pm.callback_power_runtime_term = NULL; + kbdev->pm.backend.callback_power_runtime_on = NULL; + kbdev->pm.backend.callback_power_runtime_off = NULL; + kbdev->pm.backend.callback_power_runtime_idle = NULL; + + return 0; +} + +void kbase_pm_runtime_term(struct kbase_device *kbdev) +{ + if (kbdev->pm.callback_power_runtime_term) { + kbdev->pm.callback_power_runtime_term(kbdev); + } +} + +void kbase_pm_register_access_enable(struct kbase_device *kbdev) +{ + struct kbase_pm_callback_conf *callbacks; + + callbacks = (struct kbase_pm_callback_conf *)POWER_MANAGEMENT_CALLBACKS; + + if (callbacks) + callbacks->power_on_callback(kbdev); + + kbdev->pm.backend.gpu_powered = true; +} + +void kbase_pm_register_access_disable(struct kbase_device *kbdev) +{ + struct kbase_pm_callback_conf *callbacks; + + callbacks = (struct kbase_pm_callback_conf *)POWER_MANAGEMENT_CALLBACKS; + + if (callbacks) + callbacks->power_off_callback(kbdev); + + kbdev->pm.backend.gpu_powered = false; +} + +int kbase_hwaccess_pm_init(struct kbase_device *kbdev) +{ + int ret = 0; + + KBASE_DEBUG_ASSERT(kbdev != NULL); + + mutex_init(&kbdev->pm.lock); + + kbdev->pm.backend.gpu_poweroff_wait_wq = alloc_workqueue("kbase_pm_poweroff_wait", + WQ_HIGHPRI | WQ_UNBOUND, 1); + if (!kbdev->pm.backend.gpu_poweroff_wait_wq) + return -ENOMEM; + + INIT_WORK(&kbdev->pm.backend.gpu_poweroff_wait_work, + kbase_pm_gpu_poweroff_wait_wq); + + kbdev->pm.backend.gpu_powered = false; + kbdev->pm.suspending = false; +#ifdef CONFIG_MALI_DEBUG + kbdev->pm.backend.driver_ready_for_irqs = false; +#endif /* CONFIG_MALI_DEBUG */ + kbdev->pm.backend.gpu_in_desired_state = true; + init_waitqueue_head(&kbdev->pm.backend.gpu_in_desired_state_wait); + + /* Initialise the metrics subsystem */ + ret = kbasep_pm_metrics_init(kbdev); + if (ret) + return ret; + + init_waitqueue_head(&kbdev->pm.backend.l2_powered_wait); + kbdev->pm.backend.l2_powered = 0; + + init_waitqueue_head(&kbdev->pm.backend.reset_done_wait); + kbdev->pm.backend.reset_done = false; + + init_waitqueue_head(&kbdev->pm.zero_active_count_wait); + kbdev->pm.active_count = 0; + + spin_lock_init(&kbdev->pm.backend.gpu_cycle_counter_requests_lock); + spin_lock_init(&kbdev->pm.backend.gpu_powered_lock); + + init_waitqueue_head(&kbdev->pm.backend.poweroff_wait); + + if (kbase_pm_ca_init(kbdev) != 0) + goto workq_fail; + + if (kbase_pm_policy_init(kbdev) != 0) + goto pm_policy_fail; + + return 0; + +pm_policy_fail: + kbase_pm_ca_term(kbdev); +workq_fail: + kbasep_pm_metrics_term(kbdev); + return -EINVAL; +} + +void kbase_pm_do_poweron(struct kbase_device *kbdev, bool is_resume) +{ + lockdep_assert_held(&kbdev->pm.lock); + + /* Turn clocks and interrupts on - no-op if we haven't done a previous + * kbase_pm_clock_off() */ + kbase_pm_clock_on(kbdev, is_resume); + + /* Update core status as required by the policy */ + KBASE_TIMELINE_PM_CHECKTRANS(kbdev, + SW_FLOW_PM_CHECKTRANS_PM_DO_POWERON_START); + kbase_pm_update_cores_state(kbdev); + KBASE_TIMELINE_PM_CHECKTRANS(kbdev, + SW_FLOW_PM_CHECKTRANS_PM_DO_POWERON_END); + + /* NOTE: We don't wait to reach the desired state, since running atoms + * will wait for that state to be reached anyway */ +} + +static void kbase_pm_gpu_poweroff_wait_wq(struct work_struct *data) +{ + struct kbase_device *kbdev = container_of(data, struct kbase_device, + pm.backend.gpu_poweroff_wait_work); + struct kbase_pm_device_data *pm = &kbdev->pm; + struct kbase_pm_backend_data *backend = &pm->backend; + struct kbasep_js_device_data *js_devdata = &kbdev->js_data; + unsigned long flags; + +#if !PLATFORM_POWER_DOWN_ONLY + /* Wait for power transitions to complete. We do this with no locks held + * so that we don't deadlock with any pending workqueues */ + KBASE_TIMELINE_PM_CHECKTRANS(kbdev, + SW_FLOW_PM_CHECKTRANS_PM_DO_POWEROFF_START); + kbase_pm_check_transitions_sync(kbdev); + KBASE_TIMELINE_PM_CHECKTRANS(kbdev, + SW_FLOW_PM_CHECKTRANS_PM_DO_POWEROFF_END); +#endif /* !PLATFORM_POWER_DOWN_ONLY */ + + mutex_lock(&js_devdata->runpool_mutex); + mutex_lock(&kbdev->pm.lock); + +#if PLATFORM_POWER_DOWN_ONLY + if (kbdev->pm.backend.gpu_powered) { + if (kbase_pm_get_ready_cores(kbdev, KBASE_PM_CORE_L2)) { + /* If L2 cache is powered then we must flush it before + * we power off the GPU. Normally this would have been + * handled when the L2 was powered off. */ + kbase_gpu_cacheclean(kbdev); + } + } +#endif /* PLATFORM_POWER_DOWN_ONLY */ + + if (!backend->poweron_required) { +#if !PLATFORM_POWER_DOWN_ONLY + unsigned long flags; + + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); + WARN_ON(kbdev->l2_available_bitmap || + kbdev->shader_available_bitmap || + kbdev->tiler_available_bitmap); + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); +#endif /* !PLATFORM_POWER_DOWN_ONLY */ + + /* Consume any change-state events */ + kbase_timeline_pm_check_handle_event(kbdev, + KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED); + + /* Disable interrupts and turn the clock off */ + if (!kbase_pm_clock_off(kbdev, backend->poweroff_is_suspend)) { + /* + * Page/bus faults are pending, must drop locks to + * process. Interrupts are disabled so no more faults + * should be generated at this point. + */ + mutex_unlock(&kbdev->pm.lock); + mutex_unlock(&js_devdata->runpool_mutex); + kbase_flush_mmu_wqs(kbdev); + mutex_lock(&js_devdata->runpool_mutex); + mutex_lock(&kbdev->pm.lock); + + /* Turn off clock now that fault have been handled. We + * dropped locks so poweron_required may have changed - + * power back on if this is the case.*/ + if (backend->poweron_required) + kbase_pm_clock_on(kbdev, false); + else + WARN_ON(!kbase_pm_clock_off(kbdev, + backend->poweroff_is_suspend)); + } + } + + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); + backend->poweroff_wait_in_progress = false; + if (backend->poweron_required) { + backend->poweron_required = false; + kbase_pm_update_cores_state_nolock(kbdev); + kbase_backend_slot_update(kbdev); + } + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); + + mutex_unlock(&kbdev->pm.lock); + mutex_unlock(&js_devdata->runpool_mutex); + + wake_up(&kbdev->pm.backend.poweroff_wait); +} + +void kbase_pm_do_poweroff(struct kbase_device *kbdev, bool is_suspend) +{ + unsigned long flags; + + lockdep_assert_held(&kbdev->pm.lock); + + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); + if (!kbdev->pm.backend.poweroff_wait_in_progress) { + /* Force all cores off */ + kbdev->pm.backend.desired_shader_state = 0; + kbdev->pm.backend.desired_tiler_state = 0; + + /* Force all cores to be unavailable, in the situation where + * transitions are in progress for some cores but not others, + * and kbase_pm_check_transitions_nolock can not immediately + * power off the cores */ + kbdev->shader_available_bitmap = 0; + kbdev->tiler_available_bitmap = 0; + kbdev->l2_available_bitmap = 0; + + kbdev->pm.backend.poweroff_wait_in_progress = true; + kbdev->pm.backend.poweroff_is_suspend = is_suspend; + + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); + /*Kick off wq here. Callers will have to wait*/ + queue_work(kbdev->pm.backend.gpu_poweroff_wait_wq, + &kbdev->pm.backend.gpu_poweroff_wait_work); + } else { + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); + } +} + +static bool is_poweroff_in_progress(struct kbase_device *kbdev) +{ + bool ret; + unsigned long flags; + + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); + ret = (kbdev->pm.backend.poweroff_wait_in_progress == false); + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); + + return ret; +} + +void kbase_pm_wait_for_poweroff_complete(struct kbase_device *kbdev) +{ + wait_event_killable(kbdev->pm.backend.poweroff_wait, + is_poweroff_in_progress(kbdev)); +} + +int kbase_hwaccess_pm_powerup(struct kbase_device *kbdev, + unsigned int flags) +{ + struct kbasep_js_device_data *js_devdata = &kbdev->js_data; + unsigned long irq_flags; + int ret; + + KBASE_DEBUG_ASSERT(kbdev != NULL); + + mutex_lock(&js_devdata->runpool_mutex); + mutex_lock(&kbdev->pm.lock); + + /* A suspend won't happen during startup/insmod */ + KBASE_DEBUG_ASSERT(!kbase_pm_is_suspending(kbdev)); + + /* Power up the GPU, don't enable IRQs as we are not ready to receive + * them. */ + ret = kbase_pm_init_hw(kbdev, flags); + if (ret) { + mutex_unlock(&kbdev->pm.lock); + mutex_unlock(&js_devdata->runpool_mutex); + return ret; + } + + kbasep_pm_init_core_use_bitmaps(kbdev); + + kbdev->pm.debug_core_mask_all = kbdev->pm.debug_core_mask[0] = + kbdev->pm.debug_core_mask[1] = + kbdev->pm.debug_core_mask[2] = + kbdev->gpu_props.props.raw_props.shader_present; + + /* Pretend the GPU is active to prevent a power policy turning the GPU + * cores off */ + kbdev->pm.active_count = 1; + + spin_lock_irqsave(&kbdev->pm.backend.gpu_cycle_counter_requests_lock, + irq_flags); + /* Ensure cycle counter is off */ + kbdev->pm.backend.gpu_cycle_counter_requests = 0; + spin_unlock_irqrestore( + &kbdev->pm.backend.gpu_cycle_counter_requests_lock, + irq_flags); + + /* We are ready to receive IRQ's now as power policy is set up, so + * enable them now. */ +#ifdef CONFIG_MALI_DEBUG + spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock, irq_flags); + kbdev->pm.backend.driver_ready_for_irqs = true; + spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, irq_flags); +#endif + kbase_pm_enable_interrupts(kbdev); + + /* Turn on the GPU and any cores needed by the policy */ + kbase_pm_do_poweron(kbdev, false); + mutex_unlock(&kbdev->pm.lock); + mutex_unlock(&js_devdata->runpool_mutex); + + /* Idle the GPU and/or cores, if the policy wants it to */ + kbase_pm_context_idle(kbdev); + + return 0; +} + +void kbase_hwaccess_pm_halt(struct kbase_device *kbdev) +{ + KBASE_DEBUG_ASSERT(kbdev != NULL); + + mutex_lock(&kbdev->pm.lock); + kbase_pm_cancel_deferred_poweroff(kbdev); + kbase_pm_do_poweroff(kbdev, false); + mutex_unlock(&kbdev->pm.lock); +} + +KBASE_EXPORT_TEST_API(kbase_hwaccess_pm_halt); + +void kbase_hwaccess_pm_term(struct kbase_device *kbdev) +{ + KBASE_DEBUG_ASSERT(kbdev != NULL); + KBASE_DEBUG_ASSERT(kbdev->pm.active_count == 0); + KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_cycle_counter_requests == 0); + + /* Free any resources the policy allocated */ + kbase_pm_policy_term(kbdev); + kbase_pm_ca_term(kbdev); + + /* Shut down the metrics subsystem */ + kbasep_pm_metrics_term(kbdev); + + destroy_workqueue(kbdev->pm.backend.gpu_poweroff_wait_wq); +} + +void kbase_pm_power_changed(struct kbase_device *kbdev) +{ + bool cores_are_available; + unsigned long flags; + + KBASE_TIMELINE_PM_CHECKTRANS(kbdev, + SW_FLOW_PM_CHECKTRANS_GPU_INTERRUPT_START); + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); + cores_are_available = kbase_pm_check_transitions_nolock(kbdev); + KBASE_TIMELINE_PM_CHECKTRANS(kbdev, + SW_FLOW_PM_CHECKTRANS_GPU_INTERRUPT_END); + + if (cores_are_available) { + /* Log timelining information that a change in state has + * completed */ + kbase_timeline_pm_handle_event(kbdev, + KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED); + + kbase_backend_slot_update(kbdev); + } + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); +} + +void kbase_pm_set_debug_core_mask(struct kbase_device *kbdev, + u64 new_core_mask_js0, u64 new_core_mask_js1, + u64 new_core_mask_js2) +{ + kbdev->pm.debug_core_mask[0] = new_core_mask_js0; + kbdev->pm.debug_core_mask[1] = new_core_mask_js1; + kbdev->pm.debug_core_mask[2] = new_core_mask_js2; + kbdev->pm.debug_core_mask_all = new_core_mask_js0 | new_core_mask_js1 | + new_core_mask_js2; + + kbase_pm_update_cores_state_nolock(kbdev); +} + +void kbase_hwaccess_pm_gpu_active(struct kbase_device *kbdev) +{ + kbase_pm_update_active(kbdev); +} + +void kbase_hwaccess_pm_gpu_idle(struct kbase_device *kbdev) +{ + kbase_pm_update_active(kbdev); +} + +void kbase_hwaccess_pm_suspend(struct kbase_device *kbdev) +{ + struct kbasep_js_device_data *js_devdata = &kbdev->js_data; + + /* Force power off the GPU and all cores (regardless of policy), only + * after the PM active count reaches zero (otherwise, we risk turning it + * off prematurely) */ + mutex_lock(&js_devdata->runpool_mutex); + mutex_lock(&kbdev->pm.lock); + + kbase_pm_cancel_deferred_poweroff(kbdev); + kbase_pm_do_poweroff(kbdev, true); + + kbase_backend_timer_suspend(kbdev); + + mutex_unlock(&kbdev->pm.lock); + mutex_unlock(&js_devdata->runpool_mutex); + + kbase_pm_wait_for_poweroff_complete(kbdev); +} + +void kbase_hwaccess_pm_resume(struct kbase_device *kbdev) +{ + struct kbasep_js_device_data *js_devdata = &kbdev->js_data; + + mutex_lock(&js_devdata->runpool_mutex); + mutex_lock(&kbdev->pm.lock); + + kbdev->pm.suspending = false; + kbase_pm_do_poweron(kbdev, true); + + kbase_backend_timer_resume(kbdev); + + mutex_unlock(&kbdev->pm.lock); + mutex_unlock(&js_devdata->runpool_mutex); +} diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca.c new file mode 100644 index 00000000000000..85890f1e85f54a --- /dev/null +++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca.c @@ -0,0 +1,182 @@ +/* + * + * (C) COPYRIGHT 2013-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +/* + * Base kernel core availability APIs + */ + +#include +#include +#include + +static const struct kbase_pm_ca_policy *const policy_list[] = { + &kbase_pm_ca_fixed_policy_ops, +#ifdef CONFIG_MALI_DEVFREQ + &kbase_pm_ca_devfreq_policy_ops, +#endif +#if !MALI_CUSTOMER_RELEASE + &kbase_pm_ca_random_policy_ops +#endif +}; + +/** + * POLICY_COUNT - The number of policies available in the system. + * + * This is derived from the number of functions listed in policy_list. + */ +#define POLICY_COUNT (sizeof(policy_list)/sizeof(*policy_list)) + +int kbase_pm_ca_init(struct kbase_device *kbdev) +{ + KBASE_DEBUG_ASSERT(kbdev != NULL); + + kbdev->pm.backend.ca_current_policy = policy_list[0]; + + kbdev->pm.backend.ca_current_policy->init(kbdev); + + return 0; +} + +void kbase_pm_ca_term(struct kbase_device *kbdev) +{ + kbdev->pm.backend.ca_current_policy->term(kbdev); +} + +int kbase_pm_ca_list_policies(const struct kbase_pm_ca_policy * const **list) +{ + if (!list) + return POLICY_COUNT; + + *list = policy_list; + + return POLICY_COUNT; +} + +KBASE_EXPORT_TEST_API(kbase_pm_ca_list_policies); + +const struct kbase_pm_ca_policy +*kbase_pm_ca_get_policy(struct kbase_device *kbdev) +{ + KBASE_DEBUG_ASSERT(kbdev != NULL); + + return kbdev->pm.backend.ca_current_policy; +} + +KBASE_EXPORT_TEST_API(kbase_pm_ca_get_policy); + +void kbase_pm_ca_set_policy(struct kbase_device *kbdev, + const struct kbase_pm_ca_policy *new_policy) +{ + const struct kbase_pm_ca_policy *old_policy; + unsigned long flags; + + KBASE_DEBUG_ASSERT(kbdev != NULL); + KBASE_DEBUG_ASSERT(new_policy != NULL); + + KBASE_TRACE_ADD(kbdev, PM_CA_SET_POLICY, NULL, NULL, 0u, + new_policy->id); + + /* During a policy change we pretend the GPU is active */ + /* A suspend won't happen here, because we're in a syscall from a + * userspace thread */ + kbase_pm_context_active(kbdev); + + mutex_lock(&kbdev->pm.lock); + + /* Remove the policy to prevent IRQ handlers from working on it */ + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); + old_policy = kbdev->pm.backend.ca_current_policy; + kbdev->pm.backend.ca_current_policy = NULL; + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); + + if (old_policy->term) + old_policy->term(kbdev); + + if (new_policy->init) + new_policy->init(kbdev); + + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); + kbdev->pm.backend.ca_current_policy = new_policy; + + /* If any core power state changes were previously attempted, but + * couldn't be made because the policy was changing (current_policy was + * NULL), then re-try them here. */ + kbase_pm_update_cores_state_nolock(kbdev); + + kbdev->pm.backend.ca_current_policy->update_core_status(kbdev, + kbdev->shader_ready_bitmap, + kbdev->shader_transitioning_bitmap); + + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); + + mutex_unlock(&kbdev->pm.lock); + + /* Now the policy change is finished, we release our fake context active + * reference */ + kbase_pm_context_idle(kbdev); +} + +KBASE_EXPORT_TEST_API(kbase_pm_ca_set_policy); + +u64 kbase_pm_ca_get_core_mask(struct kbase_device *kbdev) +{ + lockdep_assert_held(&kbdev->hwaccess_lock); + + /* All cores must be enabled when instrumentation is in use */ + if (kbdev->pm.backend.instr_enabled) + return kbdev->gpu_props.props.raw_props.shader_present & + kbdev->pm.debug_core_mask_all; + + if (kbdev->pm.backend.ca_current_policy == NULL) + return kbdev->gpu_props.props.raw_props.shader_present & + kbdev->pm.debug_core_mask_all; + + return kbdev->pm.backend.ca_current_policy->get_core_mask(kbdev) & + kbdev->pm.debug_core_mask_all; +} + +KBASE_EXPORT_TEST_API(kbase_pm_ca_get_core_mask); + +void kbase_pm_ca_update_core_status(struct kbase_device *kbdev, u64 cores_ready, + u64 cores_transitioning) +{ + lockdep_assert_held(&kbdev->hwaccess_lock); + + if (kbdev->pm.backend.ca_current_policy != NULL) + kbdev->pm.backend.ca_current_policy->update_core_status(kbdev, + cores_ready, + cores_transitioning); +} + +void kbase_pm_ca_instr_enable(struct kbase_device *kbdev) +{ + unsigned long flags; + + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); + kbdev->pm.backend.instr_enabled = true; + + kbase_pm_update_cores_state_nolock(kbdev); + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); +} + +void kbase_pm_ca_instr_disable(struct kbase_device *kbdev) +{ + lockdep_assert_held(&kbdev->hwaccess_lock); + kbdev->pm.backend.instr_enabled = false; + + kbase_pm_update_cores_state_nolock(kbdev); +} diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca.h new file mode 100644 index 00000000000000..ee9e751f2d79b6 --- /dev/null +++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca.h @@ -0,0 +1,92 @@ +/* + * + * (C) COPYRIGHT 2011-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +/* + * Base kernel core availability APIs + */ + +#ifndef _KBASE_PM_CA_H_ +#define _KBASE_PM_CA_H_ + +/** + * kbase_pm_ca_init - Initialize core availability framework + * + * Must be called before calling any other core availability function + * + * @kbdev: The kbase device structure for the device (must be a valid pointer) + * + * Return: 0 if the core availability framework was successfully initialized, + * -errno otherwise + */ +int kbase_pm_ca_init(struct kbase_device *kbdev); + +/** + * kbase_pm_ca_term - Terminate core availability framework + * + * @kbdev: The kbase device structure for the device (must be a valid pointer) + */ +void kbase_pm_ca_term(struct kbase_device *kbdev); + +/** + * kbase_pm_ca_get_core_mask - Get currently available shaders core mask + * + * @kbdev: The kbase device structure for the device (must be a valid pointer) + * + * Returns a mask of the currently available shader cores. + * Calls into the core availability policy + * + * Return: The bit mask of available cores + */ +u64 kbase_pm_ca_get_core_mask(struct kbase_device *kbdev); + +/** + * kbase_pm_ca_update_core_status - Update core status + * + * @kbdev: The kbase device structure for the device (must be + * a valid pointer) + * @cores_ready: The bit mask of cores ready for job submission + * @cores_transitioning: The bit mask of cores that are transitioning power + * state + * + * Update core availability policy with current core power status + * + * Calls into the core availability policy + */ +void kbase_pm_ca_update_core_status(struct kbase_device *kbdev, u64 cores_ready, + u64 cores_transitioning); + +/** + * kbase_pm_ca_instr_enable - Enable override for instrumentation + * + * @kbdev: The kbase device structure for the device (must be a valid pointer) + * + * This overrides the output of the core availability policy, ensuring that all + * cores are available + */ +void kbase_pm_ca_instr_enable(struct kbase_device *kbdev); + +/** + * kbase_pm_ca_instr_disable - Disable override for instrumentation + * + * @kbdev: The kbase device structure for the device (must be a valid pointer) + * + * This disables any previously enabled override, and resumes normal policy + * functionality + */ +void kbase_pm_ca_instr_disable(struct kbase_device *kbdev); + +#endif /* _KBASE_PM_CA_H_ */ diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca_devfreq.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca_devfreq.c new file mode 100644 index 00000000000000..66bf660cffb6ef --- /dev/null +++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca_devfreq.c @@ -0,0 +1,129 @@ +/* + * + * (C) COPYRIGHT 2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +/* + * A core availability policy implementing core mask selection from devfreq OPPs + * + */ + +#include +#include +#include +#include + +void kbase_devfreq_set_core_mask(struct kbase_device *kbdev, u64 core_mask) +{ + struct kbasep_pm_ca_policy_devfreq *data = + &kbdev->pm.backend.ca_policy_data.devfreq; + unsigned long flags; + + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); + + data->cores_desired = core_mask; + + /* Disable any cores that are now unwanted */ + data->cores_enabled &= data->cores_desired; + + kbdev->pm.backend.ca_in_transition = true; + + /* If there are no cores to be powered off then power on desired cores + */ + if (!(data->cores_used & ~data->cores_desired)) { + data->cores_enabled = data->cores_desired; + kbdev->pm.backend.ca_in_transition = false; + } + + kbase_pm_update_cores_state_nolock(kbdev); + + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); + + dev_dbg(kbdev->dev, "Devfreq policy : new core mask=%llX %llX\n", + data->cores_desired, data->cores_enabled); +} + +static void devfreq_init(struct kbase_device *kbdev) +{ + struct kbasep_pm_ca_policy_devfreq *data = + &kbdev->pm.backend.ca_policy_data.devfreq; + + if (kbdev->current_core_mask) { + data->cores_enabled = kbdev->current_core_mask; + data->cores_desired = kbdev->current_core_mask; + } else { + data->cores_enabled = + kbdev->gpu_props.props.raw_props.shader_present; + data->cores_desired = + kbdev->gpu_props.props.raw_props.shader_present; + } + data->cores_used = 0; + kbdev->pm.backend.ca_in_transition = false; +} + +static void devfreq_term(struct kbase_device *kbdev) +{ +} + +static u64 devfreq_get_core_mask(struct kbase_device *kbdev) +{ + return kbdev->pm.backend.ca_policy_data.devfreq.cores_enabled; +} + +static void devfreq_update_core_status(struct kbase_device *kbdev, + u64 cores_ready, + u64 cores_transitioning) +{ + struct kbasep_pm_ca_policy_devfreq *data = + &kbdev->pm.backend.ca_policy_data.devfreq; + + lockdep_assert_held(&kbdev->hwaccess_lock); + + data->cores_used = cores_ready | cores_transitioning; + + /* If in desired state then clear transition flag */ + if (data->cores_enabled == data->cores_desired) + kbdev->pm.backend.ca_in_transition = false; + + /* If all undesired cores are now off then power on desired cores. + * The direct comparison against cores_enabled limits potential + * recursion to one level */ + if (!(data->cores_used & ~data->cores_desired) && + data->cores_enabled != data->cores_desired) { + data->cores_enabled = data->cores_desired; + + kbase_pm_update_cores_state_nolock(kbdev); + + kbdev->pm.backend.ca_in_transition = false; + } +} + +/* + * The struct kbase_pm_ca_policy structure for the devfreq core availability + * policy. + * + * This is the static structure that defines the devfreq core availability power + * policy's callback and name. + */ +const struct kbase_pm_ca_policy kbase_pm_ca_devfreq_policy_ops = { + "devfreq", /* name */ + devfreq_init, /* init */ + devfreq_term, /* term */ + devfreq_get_core_mask, /* get_core_mask */ + devfreq_update_core_status, /* update_core_status */ + 0u, /* flags */ + KBASE_PM_CA_POLICY_ID_DEVFREQ, /* id */ +}; + diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca_devfreq.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca_devfreq.h new file mode 100644 index 00000000000000..7ab3cd4d846012 --- /dev/null +++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca_devfreq.h @@ -0,0 +1,55 @@ +/* + * + * (C) COPYRIGHT 2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +/* + * A core availability policy for use with devfreq, where core masks are + * associated with OPPs. + */ + +#ifndef MALI_KBASE_PM_CA_DEVFREQ_H +#define MALI_KBASE_PM_CA_DEVFREQ_H + +/** + * struct kbasep_pm_ca_policy_devfreq - Private structure for devfreq ca policy + * + * This contains data that is private to the devfreq core availability + * policy. + * + * @cores_desired: Cores that the policy wants to be available + * @cores_enabled: Cores that the policy is currently returning as available + * @cores_used: Cores currently powered or transitioning + */ +struct kbasep_pm_ca_policy_devfreq { + u64 cores_desired; + u64 cores_enabled; + u64 cores_used; +}; + +extern const struct kbase_pm_ca_policy kbase_pm_ca_devfreq_policy_ops; + +/** + * kbase_devfreq_set_core_mask - Set core mask for policy to use + * @kbdev: Device pointer + * @core_mask: New core mask + * + * The new core mask will have immediate effect if the GPU is powered, or will + * take effect when it is next powered on. + */ +void kbase_devfreq_set_core_mask(struct kbase_device *kbdev, u64 core_mask); + +#endif /* MALI_KBASE_PM_CA_DEVFREQ_H */ + diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca_fixed.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca_fixed.c new file mode 100644 index 00000000000000..864612d31f9b36 --- /dev/null +++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca_fixed.c @@ -0,0 +1,65 @@ +/* + * + * (C) COPYRIGHT 2013-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +/* + * A power policy implementing fixed core availability + */ + +#include +#include + +static void fixed_init(struct kbase_device *kbdev) +{ + kbdev->pm.backend.ca_in_transition = false; +} + +static void fixed_term(struct kbase_device *kbdev) +{ + CSTD_UNUSED(kbdev); +} + +static u64 fixed_get_core_mask(struct kbase_device *kbdev) +{ + return kbdev->gpu_props.props.raw_props.shader_present; +} + +static void fixed_update_core_status(struct kbase_device *kbdev, + u64 cores_ready, + u64 cores_transitioning) +{ + CSTD_UNUSED(kbdev); + CSTD_UNUSED(cores_ready); + CSTD_UNUSED(cores_transitioning); +} + +/* + * The struct kbase_pm_policy structure for the fixed power policy. + * + * This is the static structure that defines the fixed power policy's callback + * and name. + */ +const struct kbase_pm_ca_policy kbase_pm_ca_fixed_policy_ops = { + "fixed", /* name */ + fixed_init, /* init */ + fixed_term, /* term */ + fixed_get_core_mask, /* get_core_mask */ + fixed_update_core_status, /* update_core_status */ + 0u, /* flags */ + KBASE_PM_CA_POLICY_ID_FIXED, /* id */ +}; + +KBASE_EXPORT_TEST_API(kbase_pm_ca_fixed_policy_ops); diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca_fixed.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca_fixed.h new file mode 100644 index 00000000000000..a763155cb703d8 --- /dev/null +++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca_fixed.h @@ -0,0 +1,40 @@ +/* + * + * (C) COPYRIGHT 2013-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +/* + * A power policy implementing fixed core availability + */ + +#ifndef MALI_KBASE_PM_CA_FIXED_H +#define MALI_KBASE_PM_CA_FIXED_H + +/** + * struct kbasep_pm_ca_policy_fixed - Private structure for policy instance data + * + * @dummy: Dummy member - no state is needed + * + * This contains data that is private to the particular power policy that is + * active. + */ +struct kbasep_pm_ca_policy_fixed { + int dummy; +}; + +extern const struct kbase_pm_ca_policy kbase_pm_ca_fixed_policy_ops; + +#endif /* MALI_KBASE_PM_CA_FIXED_H */ + diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_coarse_demand.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_coarse_demand.c new file mode 100644 index 00000000000000..f891fa225a89f1 --- /dev/null +++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_coarse_demand.c @@ -0,0 +1,70 @@ +/* + * + * (C) COPYRIGHT 2012-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + + +/* + * "Coarse Demand" power management policy + */ + +#include +#include + +static u64 coarse_demand_get_core_mask(struct kbase_device *kbdev) +{ + if (kbdev->pm.active_count == 0) + return 0; + + return kbdev->gpu_props.props.raw_props.shader_present; +} + +static bool coarse_demand_get_core_active(struct kbase_device *kbdev) +{ + if (0 == kbdev->pm.active_count && !(kbdev->shader_needed_bitmap | + kbdev->shader_inuse_bitmap) && !kbdev->tiler_needed_cnt + && !kbdev->tiler_inuse_cnt) + return false; + + return true; +} + +static void coarse_demand_init(struct kbase_device *kbdev) +{ + CSTD_UNUSED(kbdev); +} + +static void coarse_demand_term(struct kbase_device *kbdev) +{ + CSTD_UNUSED(kbdev); +} + +/* The struct kbase_pm_policy structure for the demand power policy. + * + * This is the static structure that defines the demand power policy's callback + * and name. + */ +const struct kbase_pm_policy kbase_pm_coarse_demand_policy_ops = { + "coarse_demand", /* name */ + coarse_demand_init, /* init */ + coarse_demand_term, /* term */ + coarse_demand_get_core_mask, /* get_core_mask */ + coarse_demand_get_core_active, /* get_core_active */ + 0u, /* flags */ + KBASE_PM_POLICY_ID_COARSE_DEMAND, /* id */ +}; + +KBASE_EXPORT_TEST_API(kbase_pm_coarse_demand_policy_ops); diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_coarse_demand.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_coarse_demand.h new file mode 100644 index 00000000000000..749d305eee9a90 --- /dev/null +++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_coarse_demand.h @@ -0,0 +1,64 @@ +/* + * + * (C) COPYRIGHT 2012-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + + +/* + * "Coarse Demand" power management policy + */ + +#ifndef MALI_KBASE_PM_COARSE_DEMAND_H +#define MALI_KBASE_PM_COARSE_DEMAND_H + +/** + * DOC: + * The "Coarse" demand power management policy has the following + * characteristics: + * - When KBase indicates that the GPU will be powered up, but we don't yet + * know which Job Chains are to be run: + * - All Shader Cores are powered up, regardless of whether or not they will + * be needed later. + * - When KBase indicates that a set of Shader Cores are needed to submit the + * currently queued Job Chains: + * - All Shader Cores are kept powered, regardless of whether or not they will + * be needed + * - When KBase indicates that the GPU need not be powered: + * - The Shader Cores are powered off, and the GPU itself is powered off too. + * + * @note: + * - KBase indicates the GPU will be powered up when it has a User Process that + * has just started to submit Job Chains. + * - KBase indicates the GPU need not be powered when all the Job Chains from + * User Processes have finished, and it is waiting for a User Process to + * submit some more Job Chains. + */ + +/** + * struct kbasep_pm_policy_coarse_demand - Private structure for coarse demand + * policy + * + * This contains data that is private to the coarse demand power policy. + * + * @dummy: Dummy member - no state needed + */ +struct kbasep_pm_policy_coarse_demand { + int dummy; +}; + +extern const struct kbase_pm_policy kbase_pm_coarse_demand_policy_ops; + +#endif /* MALI_KBASE_PM_COARSE_DEMAND_H */ diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_defs.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_defs.h new file mode 100644 index 00000000000000..352744ee6d7301 --- /dev/null +++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_defs.h @@ -0,0 +1,519 @@ +/* + * + * (C) COPYRIGHT 2014-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +/* + * Backend-specific Power Manager definitions + */ + +#ifndef _KBASE_PM_HWACCESS_DEFS_H_ +#define _KBASE_PM_HWACCESS_DEFS_H_ + +#include "mali_kbase_pm_ca_fixed.h" +#include "mali_kbase_pm_ca_devfreq.h" +#if !MALI_CUSTOMER_RELEASE +#include "mali_kbase_pm_ca_random.h" +#endif + +#include "mali_kbase_pm_always_on.h" +#include "mali_kbase_pm_coarse_demand.h" +#include "mali_kbase_pm_demand.h" +#if !MALI_CUSTOMER_RELEASE +#include "mali_kbase_pm_demand_always_powered.h" +#include "mali_kbase_pm_fast_start.h" +#endif + +/* Forward definition - see mali_kbase.h */ +struct kbase_device; +struct kbase_jd_atom; + +/** + * enum kbase_pm_core_type - The types of core in a GPU. + * + * These enumerated values are used in calls to + * - kbase_pm_get_present_cores() + * - kbase_pm_get_active_cores() + * - kbase_pm_get_trans_cores() + * - kbase_pm_get_ready_cores(). + * + * They specify which type of core should be acted on. These values are set in + * a manner that allows core_type_to_reg() function to be simpler and more + * efficient. + * + * @KBASE_PM_CORE_L2: The L2 cache + * @KBASE_PM_CORE_SHADER: Shader cores + * @KBASE_PM_CORE_TILER: Tiler cores + * @KBASE_PM_CORE_STACK: Core stacks + */ +enum kbase_pm_core_type { + KBASE_PM_CORE_L2 = L2_PRESENT_LO, + KBASE_PM_CORE_SHADER = SHADER_PRESENT_LO, + KBASE_PM_CORE_TILER = TILER_PRESENT_LO, + KBASE_PM_CORE_STACK = STACK_PRESENT_LO +}; + +/** + * struct kbasep_pm_metrics_data - Metrics data collected for use by the power + * management framework. + * + * @time_period_start: time at which busy/idle measurements started + * @time_busy: number of ns the GPU was busy executing jobs since the + * @time_period_start timestamp. + * @time_idle: number of ns since time_period_start the GPU was not executing + * jobs since the @time_period_start timestamp. + * @prev_busy: busy time in ns of previous time period. + * Updated when metrics are reset. + * @prev_idle: idle time in ns of previous time period + * Updated when metrics are reset. + * @gpu_active: true when the GPU is executing jobs. false when + * not. Updated when the job scheduler informs us a job in submitted + * or removed from a GPU slot. + * @busy_cl: number of ns the GPU was busy executing CL jobs. Note that + * if two CL jobs were active for 400ns, this value would be updated + * with 800. + * @busy_gl: number of ns the GPU was busy executing GL jobs. Note that + * if two GL jobs were active for 400ns, this value would be updated + * with 800. + * @active_cl_ctx: number of CL jobs active on the GPU. Array is per-device. + * @active_gl_ctx: number of GL jobs active on the GPU. Array is per-slot. As + * GL jobs never run on slot 2 this slot is not recorded. + * @lock: spinlock protecting the kbasep_pm_metrics_data structure + * @timer: timer to regularly make DVFS decisions based on the power + * management metrics. + * @timer_active: boolean indicating @timer is running + * @platform_data: pointer to data controlled by platform specific code + * @kbdev: pointer to kbase device for which metrics are collected + * + */ +struct kbasep_pm_metrics_data { + ktime_t time_period_start; + u32 time_busy; + u32 time_idle; + u32 prev_busy; + u32 prev_idle; + bool gpu_active; + u32 busy_cl[2]; + u32 busy_gl; + u32 active_cl_ctx[2]; + u32 active_gl_ctx[2]; /* GL jobs can only run on 2 of the 3 job slots */ + spinlock_t lock; + +#ifdef CONFIG_MALI_MIDGARD_DVFS + struct hrtimer timer; + bool timer_active; +#endif + + void *platform_data; + struct kbase_device *kbdev; +}; + +union kbase_pm_policy_data { + struct kbasep_pm_policy_always_on always_on; + struct kbasep_pm_policy_coarse_demand coarse_demand; + struct kbasep_pm_policy_demand demand; +#if !MALI_CUSTOMER_RELEASE + struct kbasep_pm_policy_demand_always_powered demand_always_powered; + struct kbasep_pm_policy_fast_start fast_start; +#endif +}; + +union kbase_pm_ca_policy_data { + struct kbasep_pm_ca_policy_fixed fixed; + struct kbasep_pm_ca_policy_devfreq devfreq; +#if !MALI_CUSTOMER_RELEASE + struct kbasep_pm_ca_policy_random random; +#endif +}; + +/** + * struct kbase_pm_backend_data - Data stored per device for power management. + * + * This structure contains data for the power management framework. There is one + * instance of this structure per device in the system. + * + * @ca_current_policy: The policy that is currently actively controlling core + * availability. + * @pm_current_policy: The policy that is currently actively controlling the + * power state. + * @ca_policy_data: Private data for current CA policy + * @pm_policy_data: Private data for current PM policy + * @ca_in_transition: Flag indicating when core availability policy is + * transitioning cores. The core availability policy must + * set this when a change in core availability is occurring. + * power_change_lock must be held when accessing this. + * @reset_done: Flag when a reset is complete + * @reset_done_wait: Wait queue to wait for changes to @reset_done + * @l2_powered_wait: Wait queue for whether the l2 cache has been powered as + * requested + * @l2_powered: State indicating whether all the l2 caches are powered. + * Non-zero indicates they're *all* powered + * Zero indicates that some (or all) are not powered + * @gpu_cycle_counter_requests: The reference count of active gpu cycle counter + * users + * @gpu_cycle_counter_requests_lock: Lock to protect @gpu_cycle_counter_requests + * @desired_shader_state: A bit mask identifying the shader cores that the + * power policy would like to be on. The current state + * of the cores may be different, but there should be + * transitions in progress that will eventually achieve + * this state (assuming that the policy doesn't change + * its mind in the mean time). + * @powering_on_shader_state: A bit mask indicating which shader cores are + * currently in a power-on transition + * @desired_tiler_state: A bit mask identifying the tiler cores that the power + * policy would like to be on. See @desired_shader_state + * @powering_on_tiler_state: A bit mask indicating which tiler core are + * currently in a power-on transition + * @powering_on_l2_state: A bit mask indicating which l2-caches are currently + * in a power-on transition + * @powering_on_stack_state: A bit mask indicating which core stacks are + * currently in a power-on transition + * @gpu_in_desired_state: This flag is set if the GPU is powered as requested + * by the desired_xxx_state variables + * @gpu_in_desired_state_wait: Wait queue set when @gpu_in_desired_state != 0 + * @gpu_powered: Set to true when the GPU is powered and register + * accesses are possible, false otherwise + * @instr_enabled: Set to true when instrumentation is enabled, + * false otherwise + * @cg1_disabled: Set if the policy wants to keep the second core group + * powered off + * @driver_ready_for_irqs: Debug state indicating whether sufficient + * initialization of the driver has occurred to handle + * IRQs + * @gpu_powered_lock: Spinlock that must be held when writing @gpu_powered or + * accessing @driver_ready_for_irqs + * @metrics: Structure to hold metrics for the GPU + * @gpu_poweroff_pending: number of poweroff timer ticks until the GPU is + * powered off + * @shader_poweroff_pending_time: number of poweroff timer ticks until shaders + * and/or timers are powered off + * @gpu_poweroff_timer: Timer for powering off GPU + * @gpu_poweroff_wq: Workqueue to power off GPU on when timer fires + * @gpu_poweroff_work: Workitem used on @gpu_poweroff_wq + * @shader_poweroff_pending: Bit mask of shaders to be powered off on next + * timer callback + * @tiler_poweroff_pending: Bit mask of tilers to be powered off on next timer + * callback + * @poweroff_timer_needed: true if the poweroff timer is currently required, + * false otherwise + * @poweroff_timer_running: true if the poweroff timer is currently running, + * false otherwise + * power_change_lock should be held when accessing, + * unless there is no way the timer can be running (eg + * hrtimer_cancel() was called immediately before) + * @poweroff_wait_in_progress: true if a wait for GPU power off is in progress. + * hwaccess_lock must be held when accessing + * @poweron_required: true if a GPU power on is required. Should only be set + * when poweroff_wait_in_progress is true, and therefore the + * GPU can not immediately be powered on. pm.lock must be + * held when accessing + * @poweroff_is_suspend: true if the GPU is being powered off due to a suspend + * request. pm.lock must be held when accessing + * @gpu_poweroff_wait_wq: workqueue for waiting for GPU to power off + * @gpu_poweroff_wait_work: work item for use with @gpu_poweroff_wait_wq + * @poweroff_wait: waitqueue for waiting for @gpu_poweroff_wait_work to complete + * @callback_power_on: Callback when the GPU needs to be turned on. See + * &struct kbase_pm_callback_conf + * @callback_power_off: Callback when the GPU may be turned off. See + * &struct kbase_pm_callback_conf + * @callback_power_suspend: Callback when a suspend occurs and the GPU needs to + * be turned off. See &struct kbase_pm_callback_conf + * @callback_power_resume: Callback when a resume occurs and the GPU needs to + * be turned on. See &struct kbase_pm_callback_conf + * @callback_power_runtime_on: Callback when the GPU needs to be turned on. See + * &struct kbase_pm_callback_conf + * @callback_power_runtime_off: Callback when the GPU may be turned off. See + * &struct kbase_pm_callback_conf + * @callback_power_runtime_idle: Optional callback when the GPU may be idle. See + * &struct kbase_pm_callback_conf + * + * Note: + * During an IRQ, @ca_current_policy or @pm_current_policy can be NULL when the + * policy is being changed with kbase_pm_ca_set_policy() or + * kbase_pm_set_policy(). The change is protected under + * kbase_device.pm.power_change_lock. Direct access to this + * from IRQ context must therefore check for NULL. If NULL, then + * kbase_pm_ca_set_policy() or kbase_pm_set_policy() will re-issue the policy + * functions that would have been done under IRQ. + */ +struct kbase_pm_backend_data { + const struct kbase_pm_ca_policy *ca_current_policy; + const struct kbase_pm_policy *pm_current_policy; + union kbase_pm_ca_policy_data ca_policy_data; + union kbase_pm_policy_data pm_policy_data; + bool ca_in_transition; + bool reset_done; + wait_queue_head_t reset_done_wait; + wait_queue_head_t l2_powered_wait; + int l2_powered; + int gpu_cycle_counter_requests; + spinlock_t gpu_cycle_counter_requests_lock; + + u64 desired_shader_state; + u64 powering_on_shader_state; + u64 desired_tiler_state; + u64 powering_on_tiler_state; + u64 powering_on_l2_state; +#ifdef CONFIG_MALI_CORESTACK + u64 powering_on_stack_state; +#endif /* CONFIG_MALI_CORESTACK */ + + bool gpu_in_desired_state; + wait_queue_head_t gpu_in_desired_state_wait; + + bool gpu_powered; + + bool instr_enabled; + + bool cg1_disabled; + +#ifdef CONFIG_MALI_DEBUG + bool driver_ready_for_irqs; +#endif /* CONFIG_MALI_DEBUG */ + + spinlock_t gpu_powered_lock; + + + struct kbasep_pm_metrics_data metrics; + + int gpu_poweroff_pending; + int shader_poweroff_pending_time; + + struct hrtimer gpu_poweroff_timer; + struct workqueue_struct *gpu_poweroff_wq; + struct work_struct gpu_poweroff_work; + + u64 shader_poweroff_pending; + u64 tiler_poweroff_pending; + + bool poweroff_timer_needed; + bool poweroff_timer_running; + + bool poweroff_wait_in_progress; + bool poweron_required; + bool poweroff_is_suspend; + + struct workqueue_struct *gpu_poweroff_wait_wq; + struct work_struct gpu_poweroff_wait_work; + + wait_queue_head_t poweroff_wait; + + int (*callback_power_on)(struct kbase_device *kbdev); + void (*callback_power_off)(struct kbase_device *kbdev); + void (*callback_power_suspend)(struct kbase_device *kbdev); + void (*callback_power_resume)(struct kbase_device *kbdev); + int (*callback_power_runtime_on)(struct kbase_device *kbdev); + void (*callback_power_runtime_off)(struct kbase_device *kbdev); + int (*callback_power_runtime_idle)(struct kbase_device *kbdev); +}; + + +/* List of policy IDs */ +enum kbase_pm_policy_id { + KBASE_PM_POLICY_ID_DEMAND = 1, + KBASE_PM_POLICY_ID_ALWAYS_ON, + KBASE_PM_POLICY_ID_COARSE_DEMAND, +#if !MALI_CUSTOMER_RELEASE + KBASE_PM_POLICY_ID_DEMAND_ALWAYS_POWERED, + KBASE_PM_POLICY_ID_FAST_START +#endif +}; + +typedef u32 kbase_pm_policy_flags; + +/** + * struct kbase_pm_policy - Power policy structure. + * + * Each power policy exposes a (static) instance of this structure which + * contains function pointers to the policy's methods. + * + * @name: The name of this policy + * @init: Function called when the policy is selected + * @term: Function called when the policy is unselected + * @get_core_mask: Function called to get the current shader core mask + * @get_core_active: Function called to get the current overall GPU power + * state + * @flags: Field indicating flags for this policy + * @id: Field indicating an ID for this policy. This is not + * necessarily the same as its index in the list returned + * by kbase_pm_list_policies(). + * It is used purely for debugging. + */ +struct kbase_pm_policy { + char *name; + + /** + * Function called when the policy is selected + * + * This should initialize the kbdev->pm.pm_policy_data structure. It + * should not attempt to make any changes to hardware state. + * + * It is undefined what state the cores are in when the function is + * called. + * + * @kbdev: The kbase device structure for the device (must be a + * valid pointer) + */ + void (*init)(struct kbase_device *kbdev); + + /** + * Function called when the policy is unselected. + * + * @kbdev: The kbase device structure for the device (must be a + * valid pointer) + */ + void (*term)(struct kbase_device *kbdev); + + /** + * Function called to get the current shader core mask + * + * The returned mask should meet or exceed (kbdev->shader_needed_bitmap + * | kbdev->shader_inuse_bitmap). + * + * @kbdev: The kbase device structure for the device (must be a + * valid pointer) + * + * Return: The mask of shader cores to be powered + */ + u64 (*get_core_mask)(struct kbase_device *kbdev); + + /** + * Function called to get the current overall GPU power state + * + * This function should consider the state of kbdev->pm.active_count. If + * this count is greater than 0 then there is at least one active + * context on the device and the GPU should be powered. If it is equal + * to 0 then there are no active contexts and the GPU could be powered + * off if desired. + * + * @kbdev: The kbase device structure for the device (must be a + * valid pointer) + * + * Return: true if the GPU should be powered, false otherwise + */ + bool (*get_core_active)(struct kbase_device *kbdev); + + kbase_pm_policy_flags flags; + enum kbase_pm_policy_id id; +}; + + +enum kbase_pm_ca_policy_id { + KBASE_PM_CA_POLICY_ID_FIXED = 1, + KBASE_PM_CA_POLICY_ID_DEVFREQ, + KBASE_PM_CA_POLICY_ID_RANDOM +}; + +typedef u32 kbase_pm_ca_policy_flags; + +/** + * Maximum length of a CA policy names + */ +#define KBASE_PM_CA_MAX_POLICY_NAME_LEN 15 + +/** + * struct kbase_pm_ca_policy - Core availability policy structure. + * + * Each core availability policy exposes a (static) instance of this structure + * which contains function pointers to the policy's methods. + * + * @name: The name of this policy + * @init: Function called when the policy is selected + * @term: Function called when the policy is unselected + * @get_core_mask: Function called to get the current shader core + * availability mask + * @update_core_status: Function called to update the current core status + * @flags: Field indicating flags for this policy + * @id: Field indicating an ID for this policy. This is not + * necessarily the same as its index in the list returned + * by kbase_pm_list_policies(). + * It is used purely for debugging. + */ +struct kbase_pm_ca_policy { + char name[KBASE_PM_CA_MAX_POLICY_NAME_LEN + 1]; + + /** + * Function called when the policy is selected + * + * This should initialize the kbdev->pm.ca_policy_data structure. It + * should not attempt to make any changes to hardware state. + * + * It is undefined what state the cores are in when the function is + * called. + * + * @kbdev The kbase device structure for the device (must be a + * valid pointer) + */ + void (*init)(struct kbase_device *kbdev); + + /** + * Function called when the policy is unselected. + * + * @kbdev The kbase device structure for the device (must be a + * valid pointer) + */ + void (*term)(struct kbase_device *kbdev); + + /** + * Function called to get the current shader core availability mask + * + * When a change in core availability is occurring, the policy must set + * kbdev->pm.ca_in_transition to true. This is to indicate that + * reporting changes in power state cannot be optimized out, even if + * kbdev->pm.desired_shader_state remains unchanged. This must be done + * by any functions internal to the Core Availability Policy that change + * the return value of kbase_pm_ca_policy::get_core_mask. + * + * @kbdev The kbase device structure for the device (must be a + * valid pointer) + * + * Return: The current core availability mask + */ + u64 (*get_core_mask)(struct kbase_device *kbdev); + + /** + * Function called to update the current core status + * + * If none of the cores in core group 0 are ready or transitioning, then + * the policy must ensure that the next call to get_core_mask does not + * return 0 for all cores in core group 0. It is an error to disable + * core group 0 through the core availability policy. + * + * When a change in core availability has finished, the policy must set + * kbdev->pm.ca_in_transition to false. This is to indicate that + * changes in power state can once again be optimized out when + * kbdev->pm.desired_shader_state is unchanged. + * + * @kbdev: The kbase device structure for the device + * (must be a valid pointer) + * @cores_ready: The mask of cores currently powered and + * ready to run jobs + * @cores_transitioning: The mask of cores currently transitioning + * power state + */ + void (*update_core_status)(struct kbase_device *kbdev, u64 cores_ready, + u64 cores_transitioning); + + kbase_pm_ca_policy_flags flags; + + /** + * Field indicating an ID for this policy. This is not necessarily the + * same as its index in the list returned by kbase_pm_list_policies(). + * It is used purely for debugging. + */ + enum kbase_pm_ca_policy_id id; +}; + +#endif /* _KBASE_PM_HWACCESS_DEFS_H_ */ diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_demand.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_demand.c new file mode 100644 index 00000000000000..81322fd0dd1757 --- /dev/null +++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_demand.c @@ -0,0 +1,73 @@ +/* + * + * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + + +/* + * A simple demand based power management policy + */ + +#include +#include + +static u64 demand_get_core_mask(struct kbase_device *kbdev) +{ + u64 desired = kbdev->shader_needed_bitmap | kbdev->shader_inuse_bitmap; + + if (0 == kbdev->pm.active_count) + return 0; + + return desired; +} + +static bool demand_get_core_active(struct kbase_device *kbdev) +{ + if (0 == kbdev->pm.active_count && !(kbdev->shader_needed_bitmap | + kbdev->shader_inuse_bitmap) && !kbdev->tiler_needed_cnt + && !kbdev->tiler_inuse_cnt) + return false; + + return true; +} + +static void demand_init(struct kbase_device *kbdev) +{ + CSTD_UNUSED(kbdev); +} + +static void demand_term(struct kbase_device *kbdev) +{ + CSTD_UNUSED(kbdev); +} + +/* + * The struct kbase_pm_policy structure for the demand power policy. + * + * This is the static structure that defines the demand power policy's callback + * and name. + */ +const struct kbase_pm_policy kbase_pm_demand_policy_ops = { + "demand", /* name */ + demand_init, /* init */ + demand_term, /* term */ + demand_get_core_mask, /* get_core_mask */ + demand_get_core_active, /* get_core_active */ + 0u, /* flags */ + KBASE_PM_POLICY_ID_DEMAND, /* id */ +}; + +KBASE_EXPORT_TEST_API(kbase_pm_demand_policy_ops); diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_demand.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_demand.h new file mode 100644 index 00000000000000..c0c84b6e918914 --- /dev/null +++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_demand.h @@ -0,0 +1,64 @@ +/* + * + * (C) COPYRIGHT 2011-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + + +/* + * A simple demand based power management policy + */ + +#ifndef MALI_KBASE_PM_DEMAND_H +#define MALI_KBASE_PM_DEMAND_H + +/** + * DOC: Demand power management policy + * + * The demand power management policy has the following characteristics: + * - When KBase indicates that the GPU will be powered up, but we don't yet + * know which Job Chains are to be run: + * - The Shader Cores are not powered up + * + * - When KBase indicates that a set of Shader Cores are needed to submit the + * currently queued Job Chains: + * - Only those Shader Cores are powered up + * + * - When KBase indicates that the GPU need not be powered: + * - The Shader Cores are powered off, and the GPU itself is powered off too. + * + * Note: + * - KBase indicates the GPU will be powered up when it has a User Process that + * has just started to submit Job Chains. + * + * - KBase indicates the GPU need not be powered when all the Job Chains from + * User Processes have finished, and it is waiting for a User Process to + * submit some more Job Chains. + */ + +/** + * struct kbasep_pm_policy_demand - Private structure for policy instance data + * + * @dummy: No state is needed, a dummy variable + * + * This contains data that is private to the demand power policy. + */ +struct kbasep_pm_policy_demand { + int dummy; +}; + +extern const struct kbase_pm_policy kbase_pm_demand_policy_ops; + +#endif /* MALI_KBASE_PM_DEMAND_H */ diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_driver.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_driver.c new file mode 100644 index 00000000000000..26802e4b72daa5 --- /dev/null +++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_driver.c @@ -0,0 +1,1682 @@ +/* + * + * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + + +/* + * Base kernel Power Management hardware control + */ + +#include +#include +#include +#if defined(CONFIG_MALI_GATOR_SUPPORT) +#include +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#if MALI_MOCK_TEST +#define MOCKABLE(function) function##_original +#else +#define MOCKABLE(function) function +#endif /* MALI_MOCK_TEST */ + +/** + * enum kbasep_pm_action - Actions that can be performed on a core. + * + * This enumeration is private to the file. Its values are set to allow + * core_type_to_reg() function, which decodes this enumeration, to be simpler + * and more efficient. + * + * @ACTION_PRESENT: The cores that are present + * @ACTION_READY: The cores that are ready + * @ACTION_PWRON: Power on the cores specified + * @ACTION_PWROFF: Power off the cores specified + * @ACTION_PWRTRANS: The cores that are transitioning + * @ACTION_PWRACTIVE: The cores that are active + */ +enum kbasep_pm_action { + ACTION_PRESENT = 0, + ACTION_READY = (SHADER_READY_LO - SHADER_PRESENT_LO), + ACTION_PWRON = (SHADER_PWRON_LO - SHADER_PRESENT_LO), + ACTION_PWROFF = (SHADER_PWROFF_LO - SHADER_PRESENT_LO), + ACTION_PWRTRANS = (SHADER_PWRTRANS_LO - SHADER_PRESENT_LO), + ACTION_PWRACTIVE = (SHADER_PWRACTIVE_LO - SHADER_PRESENT_LO) +}; + +static u64 kbase_pm_get_state( + struct kbase_device *kbdev, + enum kbase_pm_core_type core_type, + enum kbasep_pm_action action); + +/** + * core_type_to_reg - Decode a core type and action to a register. + * + * Given a core type (defined by kbase_pm_core_type) and an action (defined + * by kbasep_pm_action) this function will return the register offset that + * will perform the action on the core type. The register returned is the _LO + * register and an offset must be applied to use the _HI register. + * + * @core_type: The type of core + * @action: The type of action + * + * Return: The register offset of the _LO register that performs an action of + * type @action on a core of type @core_type. + */ +static u32 core_type_to_reg(enum kbase_pm_core_type core_type, + enum kbasep_pm_action action) +{ +#ifdef CONFIG_MALI_CORESTACK + if (core_type == KBASE_PM_CORE_STACK) { + switch (action) { + case ACTION_PRESENT: + return STACK_PRESENT_LO; + case ACTION_READY: + return STACK_READY_LO; + case ACTION_PWRON: + return STACK_PWRON_LO; + case ACTION_PWROFF: + return STACK_PWROFF_LO; + case ACTION_PWRTRANS: + return STACK_PWRTRANS_LO; + default: + BUG(); + } + } +#endif /* CONFIG_MALI_CORESTACK */ + + return (u32)core_type + (u32)action; +} + +#ifdef CONFIG_ARM64 +static void mali_cci_flush_l2(struct kbase_device *kbdev) +{ + const u32 mask = CLEAN_CACHES_COMPLETED | RESET_COMPLETED; + u32 loops = KBASE_CLEAN_CACHE_MAX_LOOPS; + u32 raw; + + /* + * Note that we don't take the cache flush mutex here since + * we expect to be the last user of the L2, all other L2 users + * would have dropped their references, to initiate L2 power + * down, L2 power down being the only valid place for this + * to be called from. + */ + + kbase_reg_write(kbdev, + GPU_CONTROL_REG(GPU_COMMAND), + GPU_COMMAND_CLEAN_INV_CACHES, + NULL); + + raw = kbase_reg_read(kbdev, + GPU_CONTROL_REG(GPU_IRQ_RAWSTAT), + NULL); + + /* Wait for cache flush to complete before continuing, exit on + * gpu resets or loop expiry. */ + while (((raw & mask) == 0) && --loops) { + raw = kbase_reg_read(kbdev, + GPU_CONTROL_REG(GPU_IRQ_RAWSTAT), + NULL); + } +} +#endif + +/** + * kbase_pm_invoke - Invokes an action on a core set + * + * This function performs the action given by @action on a set of cores of a + * type given by @core_type. It is a static function used by + * kbase_pm_transition_core_type() + * + * @kbdev: The kbase device structure of the device + * @core_type: The type of core that the action should be performed on + * @cores: A bit mask of cores to perform the action on (low 32 bits) + * @action: The action to perform on the cores + */ +static void kbase_pm_invoke(struct kbase_device *kbdev, + enum kbase_pm_core_type core_type, + u64 cores, + enum kbasep_pm_action action) +{ + u32 reg; + u32 lo = cores & 0xFFFFFFFF; + u32 hi = (cores >> 32) & 0xFFFFFFFF; + + lockdep_assert_held(&kbdev->hwaccess_lock); + + reg = core_type_to_reg(core_type, action); + + KBASE_DEBUG_ASSERT(reg); +#if defined(CONFIG_MALI_GATOR_SUPPORT) + if (cores) { + if (action == ACTION_PWRON) + kbase_trace_mali_pm_power_on(core_type, cores); + else if (action == ACTION_PWROFF) + kbase_trace_mali_pm_power_off(core_type, cores); + } +#endif + + if (cores) { + u64 state = kbase_pm_get_state(kbdev, core_type, ACTION_READY); + + if (action == ACTION_PWRON) + state |= cores; + else if (action == ACTION_PWROFF) + state &= ~cores; + KBASE_TLSTREAM_AUX_PM_STATE(core_type, state); + } + + /* Tracing */ + if (cores) { + if (action == ACTION_PWRON) + switch (core_type) { + case KBASE_PM_CORE_SHADER: + KBASE_TRACE_ADD(kbdev, PM_PWRON, NULL, NULL, 0u, + lo); + break; + case KBASE_PM_CORE_TILER: + KBASE_TRACE_ADD(kbdev, PM_PWRON_TILER, NULL, + NULL, 0u, lo); + break; + case KBASE_PM_CORE_L2: + KBASE_TRACE_ADD(kbdev, PM_PWRON_L2, NULL, NULL, + 0u, lo); + break; + default: + break; + } + else if (action == ACTION_PWROFF) + switch (core_type) { + case KBASE_PM_CORE_SHADER: + KBASE_TRACE_ADD(kbdev, PM_PWROFF, NULL, NULL, + 0u, lo); + break; + case KBASE_PM_CORE_TILER: + KBASE_TRACE_ADD(kbdev, PM_PWROFF_TILER, NULL, + NULL, 0u, lo); + break; + case KBASE_PM_CORE_L2: + KBASE_TRACE_ADD(kbdev, PM_PWROFF_L2, NULL, NULL, + 0u, lo); + /* disable snoops before L2 is turned off */ + kbase_pm_cache_snoop_disable(kbdev); + break; + default: + break; + } + } + + if (lo != 0) + kbase_reg_write(kbdev, GPU_CONTROL_REG(reg), lo, NULL); + + if (hi != 0) + kbase_reg_write(kbdev, GPU_CONTROL_REG(reg + 4), hi, NULL); +} + +/** + * kbase_pm_get_state - Get information about a core set + * + * This function gets information (chosen by @action) about a set of cores of + * a type given by @core_type. It is a static function used by + * kbase_pm_get_active_cores(), kbase_pm_get_trans_cores() and + * kbase_pm_get_ready_cores(). + * + * @kbdev: The kbase device structure of the device + * @core_type: The type of core that the should be queried + * @action: The property of the cores to query + * + * Return: A bit mask specifying the state of the cores + */ +static u64 kbase_pm_get_state(struct kbase_device *kbdev, + enum kbase_pm_core_type core_type, + enum kbasep_pm_action action) +{ + u32 reg; + u32 lo, hi; + + reg = core_type_to_reg(core_type, action); + + KBASE_DEBUG_ASSERT(reg); + + lo = kbase_reg_read(kbdev, GPU_CONTROL_REG(reg), NULL); + hi = kbase_reg_read(kbdev, GPU_CONTROL_REG(reg + 4), NULL); + + return (((u64) hi) << 32) | ((u64) lo); +} + +void kbasep_pm_init_core_use_bitmaps(struct kbase_device *kbdev) +{ + kbdev->shader_inuse_bitmap = 0; + kbdev->shader_needed_bitmap = 0; + kbdev->shader_available_bitmap = 0; + kbdev->tiler_available_bitmap = 0; + kbdev->l2_users_count = 0; + kbdev->l2_available_bitmap = 0; + kbdev->tiler_needed_cnt = 0; + kbdev->tiler_inuse_cnt = 0; + + memset(kbdev->shader_needed_cnt, 0, sizeof(kbdev->shader_needed_cnt)); +} + +/** + * kbase_pm_get_present_cores - Get the cores that are present + * + * @kbdev: Kbase device + * @type: The type of cores to query + * + * Return: Bitmask of the cores that are present + */ +u64 kbase_pm_get_present_cores(struct kbase_device *kbdev, + enum kbase_pm_core_type type) +{ + KBASE_DEBUG_ASSERT(kbdev != NULL); + + switch (type) { + case KBASE_PM_CORE_L2: + return kbdev->gpu_props.props.raw_props.l2_present; + case KBASE_PM_CORE_SHADER: + return kbdev->gpu_props.props.raw_props.shader_present; + case KBASE_PM_CORE_TILER: + return kbdev->gpu_props.props.raw_props.tiler_present; + case KBASE_PM_CORE_STACK: + return kbdev->gpu_props.props.raw_props.stack_present; + default: + break; + } + KBASE_DEBUG_ASSERT(0); + + return 0; +} + +KBASE_EXPORT_TEST_API(kbase_pm_get_present_cores); + +/** + * kbase_pm_get_active_cores - Get the cores that are "active" + * (busy processing work) + * + * @kbdev: Kbase device + * @type: The type of cores to query + * + * Return: Bitmask of cores that are active + */ +u64 kbase_pm_get_active_cores(struct kbase_device *kbdev, + enum kbase_pm_core_type type) +{ + return kbase_pm_get_state(kbdev, type, ACTION_PWRACTIVE); +} + +KBASE_EXPORT_TEST_API(kbase_pm_get_active_cores); + +/** + * kbase_pm_get_trans_cores - Get the cores that are transitioning between + * power states + * + * @kbdev: Kbase device + * @type: The type of cores to query + * + * Return: Bitmask of cores that are transitioning + */ +u64 kbase_pm_get_trans_cores(struct kbase_device *kbdev, + enum kbase_pm_core_type type) +{ + return kbase_pm_get_state(kbdev, type, ACTION_PWRTRANS); +} + +KBASE_EXPORT_TEST_API(kbase_pm_get_trans_cores); + +/** + * kbase_pm_get_ready_cores - Get the cores that are powered on + * + * @kbdev: Kbase device + * @type: The type of cores to query + * + * Return: Bitmask of cores that are ready (powered on) + */ +u64 kbase_pm_get_ready_cores(struct kbase_device *kbdev, + enum kbase_pm_core_type type) +{ + u64 result; + + result = kbase_pm_get_state(kbdev, type, ACTION_READY); + + switch (type) { + case KBASE_PM_CORE_SHADER: + KBASE_TRACE_ADD(kbdev, PM_CORES_POWERED, NULL, NULL, 0u, + (u32) result); + break; + case KBASE_PM_CORE_TILER: + KBASE_TRACE_ADD(kbdev, PM_CORES_POWERED_TILER, NULL, NULL, 0u, + (u32) result); + break; + case KBASE_PM_CORE_L2: + KBASE_TRACE_ADD(kbdev, PM_CORES_POWERED_L2, NULL, NULL, 0u, + (u32) result); + break; + default: + break; + } + + return result; +} + +KBASE_EXPORT_TEST_API(kbase_pm_get_ready_cores); + +/** + * kbase_pm_transition_core_type - Perform power transitions for a particular + * core type. + * + * This function will perform any available power transitions to make the actual + * hardware state closer to the desired state. If a core is currently + * transitioning then changes to the power state of that call cannot be made + * until the transition has finished. Cores which are not present in the + * hardware are ignored if they are specified in the desired_state bitmask, + * however the return value will always be 0 in this case. + * + * @kbdev: The kbase device + * @type: The core type to perform transitions for + * @desired_state: A bit mask of the desired state of the cores + * @in_use: A bit mask of the cores that are currently running + * jobs. These cores have to be kept powered up because + * there are jobs running (or about to run) on them. + * @available: Receives a bit mask of the cores that the job + * scheduler can use to submit jobs to. May be NULL if + * this is not needed. + * @powering_on: Bit mask to update with cores that are + * transitioning to a power-on state. + * + * Return: true if the desired state has been reached, false otherwise + */ +static bool kbase_pm_transition_core_type(struct kbase_device *kbdev, + enum kbase_pm_core_type type, + u64 desired_state, + u64 in_use, + u64 * const available, + u64 *powering_on) +{ + u64 present; + u64 ready; + u64 trans; + u64 powerup; + u64 powerdown; + u64 powering_on_trans; + u64 desired_state_in_use; + + lockdep_assert_held(&kbdev->hwaccess_lock); + + /* Get current state */ + present = kbase_pm_get_present_cores(kbdev, type); + trans = kbase_pm_get_trans_cores(kbdev, type); + ready = kbase_pm_get_ready_cores(kbdev, type); + /* mask off ready from trans in case transitions finished between the + * register reads */ + trans &= ~ready; + + if (trans) /* Do not progress if any cores are transitioning */ + return false; + + powering_on_trans = trans & *powering_on; + *powering_on = powering_on_trans; + + if (available != NULL) + *available = (ready | powering_on_trans) & desired_state; + + /* Update desired state to include the in-use cores. These have to be + * kept powered up because there are jobs running or about to run on + * these cores + */ + desired_state_in_use = desired_state | in_use; + + /* Update state of whether l2 caches are powered */ + if (type == KBASE_PM_CORE_L2) { + if ((ready == present) && (desired_state_in_use == ready) && + (trans == 0)) { + /* All are ready, none will be turned off, and none are + * transitioning */ + kbdev->pm.backend.l2_powered = 1; + /* + * Ensure snoops are enabled after L2 is powered up, + * note that kbase keeps track of the snoop state, so + * safe to repeatedly call. + */ + kbase_pm_cache_snoop_enable(kbdev); + if (kbdev->l2_users_count > 0) { + /* Notify any registered l2 cache users + * (optimized out when no users waiting) */ + wake_up(&kbdev->pm.backend.l2_powered_wait); + } + } else + kbdev->pm.backend.l2_powered = 0; + } + + if (desired_state == ready && (trans == 0)) + return true; + + /* Restrict the cores to those that are actually present */ + powerup = desired_state_in_use & present; + powerdown = (~desired_state_in_use) & present; + + /* Restrict to cores that are not already in the desired state */ + powerup &= ~ready; + powerdown &= ready; + + /* Don't transition any cores that are already transitioning, except for + * Mali cores that support the following case: + * + * If the SHADER_PWRON or TILER_PWRON registers are written to turn on + * a core that is currently transitioning to power off, then this is + * remembered and the shader core is automatically powered up again once + * the original transition completes. Once the automatic power on is + * complete any job scheduled on the shader core should start. + */ + powerdown &= ~trans; + + if (kbase_hw_has_feature(kbdev, + BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS)) + if (KBASE_PM_CORE_SHADER == type || KBASE_PM_CORE_TILER == type) + trans = powering_on_trans; /* for exception cases, only + * mask off cores in power on + * transitions */ + + powerup &= ~trans; + + /* Perform transitions if any */ + kbase_pm_invoke(kbdev, type, powerup, ACTION_PWRON); +#if !PLATFORM_POWER_DOWN_ONLY + kbase_pm_invoke(kbdev, type, powerdown, ACTION_PWROFF); +#endif + + /* Recalculate cores transitioning on, and re-evaluate our state */ + powering_on_trans |= powerup; + *powering_on = powering_on_trans; + if (available != NULL) + *available = (ready | powering_on_trans) & desired_state; + + return false; +} + +KBASE_EXPORT_TEST_API(kbase_pm_transition_core_type); + +/** + * get_desired_cache_status - Determine which caches should be on for a + * particular core state + * + * This function takes a bit mask of the present caches and the cores (or + * caches) that are attached to the caches that will be powered. It then + * computes which caches should be turned on to allow the cores requested to be + * powered up. + * + * @present: The bit mask of present caches + * @cores_powered: A bit mask of cores (or L2 caches) that are desired to + * be powered + * @tilers_powered: The bit mask of tilers that are desired to be powered + * + * Return: A bit mask of the caches that should be turned on + */ +static u64 get_desired_cache_status(u64 present, u64 cores_powered, + u64 tilers_powered) +{ + u64 desired = 0; + + while (present) { + /* Find out which is the highest set bit */ + u64 bit = fls64(present) - 1; + u64 bit_mask = 1ull << bit; + /* Create a mask which has all bits from 'bit' upwards set */ + + u64 mask = ~(bit_mask - 1); + + /* If there are any cores powered at this bit or above (that + * haven't previously been processed) then we need this core on + */ + if (cores_powered & mask) + desired |= bit_mask; + + /* Remove bits from cores_powered and present */ + cores_powered &= ~mask; + present &= ~bit_mask; + } + + /* Power up the required L2(s) for the tiler */ + if (tilers_powered) + desired |= 1; + + return desired; +} + +KBASE_EXPORT_TEST_API(get_desired_cache_status); + +#ifdef CONFIG_MALI_CORESTACK +u64 kbase_pm_core_stack_mask(u64 cores) +{ + u64 stack_mask = 0; + size_t const MAX_CORE_ID = 31; + size_t const NUM_CORES_PER_STACK = 4; + size_t i; + + for (i = 0; i <= MAX_CORE_ID; ++i) { + if (test_bit(i, (unsigned long *)&cores)) { + /* Every core which ID >= 16 is filled to stacks 4-7 + * instead of 0-3 */ + size_t const stack_num = (i >= 16) ? + (i % NUM_CORES_PER_STACK) + 4 : + (i % NUM_CORES_PER_STACK); + set_bit(stack_num, (unsigned long *)&stack_mask); + } + } + + return stack_mask; +} +#endif /* CONFIG_MALI_CORESTACK */ + +bool +MOCKABLE(kbase_pm_check_transitions_nolock) (struct kbase_device *kbdev) +{ + bool cores_are_available = false; + bool in_desired_state = true; + u64 desired_l2_state; +#ifdef CONFIG_MALI_CORESTACK + u64 desired_stack_state; + u64 stacks_powered; +#endif /* CONFIG_MALI_CORESTACK */ + u64 cores_powered; + u64 tilers_powered; + u64 tiler_available_bitmap; + u64 tiler_transitioning_bitmap; + u64 shader_available_bitmap; + u64 shader_ready_bitmap; + u64 shader_transitioning_bitmap; + u64 l2_available_bitmap; + u64 prev_l2_available_bitmap; + u64 l2_inuse_bitmap; + + KBASE_DEBUG_ASSERT(NULL != kbdev); + lockdep_assert_held(&kbdev->hwaccess_lock); + + spin_lock(&kbdev->pm.backend.gpu_powered_lock); + if (kbdev->pm.backend.gpu_powered == false) { + spin_unlock(&kbdev->pm.backend.gpu_powered_lock); + if (kbdev->pm.backend.desired_shader_state == 0 && + kbdev->pm.backend.desired_tiler_state == 0) + return true; + return false; + } + + /* Trace that a change-state is being requested, and that it took + * (effectively) no time to start it. This is useful for counting how + * many state changes occurred, in a way that's backwards-compatible + * with processing the trace data */ + kbase_timeline_pm_send_event(kbdev, + KBASE_TIMELINE_PM_EVENT_CHANGE_GPU_STATE); + kbase_timeline_pm_handle_event(kbdev, + KBASE_TIMELINE_PM_EVENT_CHANGE_GPU_STATE); + + /* If any cores are already powered then, we must keep the caches on */ + shader_transitioning_bitmap = kbase_pm_get_trans_cores(kbdev, + KBASE_PM_CORE_SHADER); + cores_powered = kbase_pm_get_ready_cores(kbdev, KBASE_PM_CORE_SHADER); + cores_powered |= kbdev->pm.backend.desired_shader_state; + +#ifdef CONFIG_MALI_CORESTACK + /* Work out which core stacks want to be powered */ + desired_stack_state = kbase_pm_core_stack_mask(cores_powered); + stacks_powered = kbase_pm_get_ready_cores(kbdev, KBASE_PM_CORE_STACK) | + desired_stack_state; +#endif /* CONFIG_MALI_CORESTACK */ + + /* Work out which tilers want to be powered */ + tiler_transitioning_bitmap = kbase_pm_get_trans_cores(kbdev, + KBASE_PM_CORE_TILER); + tilers_powered = kbase_pm_get_ready_cores(kbdev, KBASE_PM_CORE_TILER); + tilers_powered |= kbdev->pm.backend.desired_tiler_state; + + /* If there are l2 cache users registered, keep all l2s powered even if + * all other cores are off. */ + if (kbdev->l2_users_count > 0) + cores_powered |= kbdev->gpu_props.props.raw_props.l2_present; + + desired_l2_state = get_desired_cache_status( + kbdev->gpu_props.props.raw_props.l2_present, + cores_powered, tilers_powered); + + l2_inuse_bitmap = get_desired_cache_status( + kbdev->gpu_props.props.raw_props.l2_present, + cores_powered | shader_transitioning_bitmap, + tilers_powered | tiler_transitioning_bitmap); + +#ifdef CONFIG_MALI_CORESTACK + if (stacks_powered) + desired_l2_state |= 1; +#endif /* CONFIG_MALI_CORESTACK */ + + /* If any l2 cache is on, then enable l2 #0, for use by job manager */ + if (0 != desired_l2_state) + desired_l2_state |= 1; + + prev_l2_available_bitmap = kbdev->l2_available_bitmap; + in_desired_state &= kbase_pm_transition_core_type(kbdev, + KBASE_PM_CORE_L2, desired_l2_state, l2_inuse_bitmap, + &l2_available_bitmap, + &kbdev->pm.backend.powering_on_l2_state); + + if (kbdev->l2_available_bitmap != l2_available_bitmap) + KBASE_TIMELINE_POWER_L2(kbdev, l2_available_bitmap); + + kbdev->l2_available_bitmap = l2_available_bitmap; + + +#ifdef CONFIG_MALI_CORESTACK + if (in_desired_state) { + in_desired_state &= kbase_pm_transition_core_type(kbdev, + KBASE_PM_CORE_STACK, desired_stack_state, 0, + &kbdev->stack_available_bitmap, + &kbdev->pm.backend.powering_on_stack_state); + } +#endif /* CONFIG_MALI_CORESTACK */ + + if (in_desired_state) { + in_desired_state &= kbase_pm_transition_core_type(kbdev, + KBASE_PM_CORE_TILER, + kbdev->pm.backend.desired_tiler_state, + 0, &tiler_available_bitmap, + &kbdev->pm.backend.powering_on_tiler_state); + in_desired_state &= kbase_pm_transition_core_type(kbdev, + KBASE_PM_CORE_SHADER, + kbdev->pm.backend.desired_shader_state, + kbdev->shader_inuse_bitmap, + &shader_available_bitmap, + &kbdev->pm.backend.powering_on_shader_state); + + if (kbdev->shader_available_bitmap != shader_available_bitmap) { + KBASE_TRACE_ADD(kbdev, PM_CORES_CHANGE_AVAILABLE, NULL, + NULL, 0u, + (u32) shader_available_bitmap); + KBASE_TIMELINE_POWER_SHADER(kbdev, + shader_available_bitmap); + } + + kbdev->shader_available_bitmap = shader_available_bitmap; + + if (kbdev->tiler_available_bitmap != tiler_available_bitmap) { + KBASE_TRACE_ADD(kbdev, PM_CORES_CHANGE_AVAILABLE_TILER, + NULL, NULL, 0u, + (u32) tiler_available_bitmap); + KBASE_TIMELINE_POWER_TILER(kbdev, + tiler_available_bitmap); + } + + kbdev->tiler_available_bitmap = tiler_available_bitmap; + + } else if ((l2_available_bitmap & + kbdev->gpu_props.props.raw_props.tiler_present) != + kbdev->gpu_props.props.raw_props.tiler_present) { + tiler_available_bitmap = 0; + + if (kbdev->tiler_available_bitmap != tiler_available_bitmap) + KBASE_TIMELINE_POWER_TILER(kbdev, + tiler_available_bitmap); + + kbdev->tiler_available_bitmap = tiler_available_bitmap; + } + + /* State updated for slow-path waiters */ + kbdev->pm.backend.gpu_in_desired_state = in_desired_state; + + shader_ready_bitmap = kbase_pm_get_ready_cores(kbdev, + KBASE_PM_CORE_SHADER); + shader_transitioning_bitmap = kbase_pm_get_trans_cores(kbdev, + KBASE_PM_CORE_SHADER); + + /* Determine whether the cores are now available (even if the set of + * available cores is empty). Note that they can be available even if + * we've not finished transitioning to the desired state */ + if ((kbdev->shader_available_bitmap & + kbdev->pm.backend.desired_shader_state) + == kbdev->pm.backend.desired_shader_state && + (kbdev->tiler_available_bitmap & + kbdev->pm.backend.desired_tiler_state) + == kbdev->pm.backend.desired_tiler_state) { + cores_are_available = true; + + KBASE_TRACE_ADD(kbdev, PM_CORES_AVAILABLE, NULL, NULL, 0u, + (u32)(kbdev->shader_available_bitmap & + kbdev->pm.backend.desired_shader_state)); + KBASE_TRACE_ADD(kbdev, PM_CORES_AVAILABLE_TILER, NULL, NULL, 0u, + (u32)(kbdev->tiler_available_bitmap & + kbdev->pm.backend.desired_tiler_state)); + + /* Log timelining information about handling events that power + * up cores, to match up either with immediate submission either + * because cores already available, or from PM IRQ */ + if (!in_desired_state) + kbase_timeline_pm_send_event(kbdev, + KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED); + } + + if (in_desired_state) { + KBASE_DEBUG_ASSERT(cores_are_available); + +#if defined(CONFIG_MALI_GATOR_SUPPORT) + kbase_trace_mali_pm_status(KBASE_PM_CORE_L2, + kbase_pm_get_ready_cores(kbdev, + KBASE_PM_CORE_L2)); + kbase_trace_mali_pm_status(KBASE_PM_CORE_SHADER, + kbase_pm_get_ready_cores(kbdev, + KBASE_PM_CORE_SHADER)); + kbase_trace_mali_pm_status(KBASE_PM_CORE_TILER, + kbase_pm_get_ready_cores(kbdev, + KBASE_PM_CORE_TILER)); +#ifdef CONFIG_MALI_CORESTACK + kbase_trace_mali_pm_status(KBASE_PM_CORE_STACK, + kbase_pm_get_ready_cores(kbdev, + KBASE_PM_CORE_STACK)); +#endif /* CONFIG_MALI_CORESTACK */ +#endif + + KBASE_TLSTREAM_AUX_PM_STATE( + KBASE_PM_CORE_L2, + kbase_pm_get_ready_cores( + kbdev, KBASE_PM_CORE_L2)); + KBASE_TLSTREAM_AUX_PM_STATE( + KBASE_PM_CORE_SHADER, + kbase_pm_get_ready_cores( + kbdev, KBASE_PM_CORE_SHADER)); + KBASE_TLSTREAM_AUX_PM_STATE( + KBASE_PM_CORE_TILER, + kbase_pm_get_ready_cores( + kbdev, + KBASE_PM_CORE_TILER)); +#ifdef CONFIG_MALI_CORESTACK + KBASE_TLSTREAM_AUX_PM_STATE( + KBASE_PM_CORE_STACK, + kbase_pm_get_ready_cores( + kbdev, + KBASE_PM_CORE_STACK)); +#endif /* CONFIG_MALI_CORESTACK */ + + KBASE_TRACE_ADD(kbdev, PM_DESIRED_REACHED, NULL, NULL, + kbdev->pm.backend.gpu_in_desired_state, + (u32)kbdev->pm.backend.desired_shader_state); + KBASE_TRACE_ADD(kbdev, PM_DESIRED_REACHED_TILER, NULL, NULL, 0u, + (u32)kbdev->pm.backend.desired_tiler_state); + + /* Log timelining information for synchronous waiters */ + kbase_timeline_pm_send_event(kbdev, + KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED); + /* Wake slow-path waiters. Job scheduler does not use this. */ + KBASE_TRACE_ADD(kbdev, PM_WAKE_WAITERS, NULL, NULL, 0u, 0); + + wake_up(&kbdev->pm.backend.gpu_in_desired_state_wait); + } + + spin_unlock(&kbdev->pm.backend.gpu_powered_lock); + + /* kbase_pm_ca_update_core_status can cause one-level recursion into + * this function, so it must only be called once all changes to kbdev + * have been committed, and after the gpu_powered_lock has been + * dropped. */ + if (kbdev->shader_ready_bitmap != shader_ready_bitmap || + kbdev->shader_transitioning_bitmap != shader_transitioning_bitmap) { + kbdev->shader_ready_bitmap = shader_ready_bitmap; + kbdev->shader_transitioning_bitmap = + shader_transitioning_bitmap; + + kbase_pm_ca_update_core_status(kbdev, shader_ready_bitmap, + shader_transitioning_bitmap); + } + + /* The core availability policy is not allowed to keep core group 0 + * turned off (unless it was changing the l2 power state) */ + if (!((shader_ready_bitmap | shader_transitioning_bitmap) & + kbdev->gpu_props.props.coherency_info.group[0].core_mask) && + (prev_l2_available_bitmap == desired_l2_state) && + !(kbase_pm_ca_get_core_mask(kbdev) & + kbdev->gpu_props.props.coherency_info.group[0].core_mask)) + BUG(); + + /* The core availability policy is allowed to keep core group 1 off, + * but all jobs specifically targeting CG1 must fail */ + if (!((shader_ready_bitmap | shader_transitioning_bitmap) & + kbdev->gpu_props.props.coherency_info.group[1].core_mask) && + !(kbase_pm_ca_get_core_mask(kbdev) & + kbdev->gpu_props.props.coherency_info.group[1].core_mask)) + kbdev->pm.backend.cg1_disabled = true; + else + kbdev->pm.backend.cg1_disabled = false; + + return cores_are_available; +} +KBASE_EXPORT_TEST_API(kbase_pm_check_transitions_nolock); + +/* Timeout for kbase_pm_check_transitions_sync when wait_event_killable has + * aborted due to a fatal signal. If the time spent waiting has exceeded this + * threshold then there is most likely a hardware issue. */ +#define PM_TIMEOUT (5*HZ) /* 5s */ + +void kbase_pm_check_transitions_sync(struct kbase_device *kbdev) +{ + unsigned long flags; + unsigned long timeout; + bool cores_are_available; + int ret; + + /* Force the transition to be checked and reported - the cores may be + * 'available' (for job submission) but not fully powered up. */ + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); + + cores_are_available = kbase_pm_check_transitions_nolock(kbdev); + + /* Don't need 'cores_are_available', because we don't return anything */ + CSTD_UNUSED(cores_are_available); + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); + + timeout = jiffies + PM_TIMEOUT; + + /* Wait for cores */ + ret = wait_event_killable(kbdev->pm.backend.gpu_in_desired_state_wait, + kbdev->pm.backend.gpu_in_desired_state); + + if (ret < 0 && time_after(jiffies, timeout)) { + dev_err(kbdev->dev, "Power transition timed out unexpectedly\n"); + dev_err(kbdev->dev, "Desired state :\n"); + dev_err(kbdev->dev, "\tShader=%016llx\n", + kbdev->pm.backend.desired_shader_state); + dev_err(kbdev->dev, "\tTiler =%016llx\n", + kbdev->pm.backend.desired_tiler_state); + dev_err(kbdev->dev, "Current state :\n"); + dev_err(kbdev->dev, "\tShader=%08x%08x\n", + kbase_reg_read(kbdev, + GPU_CONTROL_REG(SHADER_READY_HI), NULL), + kbase_reg_read(kbdev, + GPU_CONTROL_REG(SHADER_READY_LO), + NULL)); + dev_err(kbdev->dev, "\tTiler =%08x%08x\n", + kbase_reg_read(kbdev, + GPU_CONTROL_REG(TILER_READY_HI), NULL), + kbase_reg_read(kbdev, + GPU_CONTROL_REG(TILER_READY_LO), NULL)); + dev_err(kbdev->dev, "\tL2 =%08x%08x\n", + kbase_reg_read(kbdev, + GPU_CONTROL_REG(L2_READY_HI), NULL), + kbase_reg_read(kbdev, + GPU_CONTROL_REG(L2_READY_LO), NULL)); + dev_err(kbdev->dev, "Cores transitioning :\n"); + dev_err(kbdev->dev, "\tShader=%08x%08x\n", + kbase_reg_read(kbdev, GPU_CONTROL_REG( + SHADER_PWRTRANS_HI), NULL), + kbase_reg_read(kbdev, GPU_CONTROL_REG( + SHADER_PWRTRANS_LO), NULL)); + dev_err(kbdev->dev, "\tTiler =%08x%08x\n", + kbase_reg_read(kbdev, GPU_CONTROL_REG( + TILER_PWRTRANS_HI), NULL), + kbase_reg_read(kbdev, GPU_CONTROL_REG( + TILER_PWRTRANS_LO), NULL)); + dev_err(kbdev->dev, "\tL2 =%08x%08x\n", + kbase_reg_read(kbdev, GPU_CONTROL_REG( + L2_PWRTRANS_HI), NULL), + kbase_reg_read(kbdev, GPU_CONTROL_REG( + L2_PWRTRANS_LO), NULL)); +#if KBASE_GPU_RESET_EN + dev_err(kbdev->dev, "Sending reset to GPU - all running jobs will be lost\n"); + if (kbase_prepare_to_reset_gpu(kbdev)) + kbase_reset_gpu(kbdev); +#endif /* KBASE_GPU_RESET_EN */ + } else { + /* Log timelining information that a change in state has + * completed */ + kbase_timeline_pm_handle_event(kbdev, + KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED); + } +} +KBASE_EXPORT_TEST_API(kbase_pm_check_transitions_sync); + +void kbase_pm_enable_interrupts(struct kbase_device *kbdev) +{ + unsigned long flags; + + KBASE_DEBUG_ASSERT(NULL != kbdev); + /* + * Clear all interrupts, + * and unmask them all. + */ + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); + kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_CLEAR), GPU_IRQ_REG_ALL, + NULL); + kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), GPU_IRQ_REG_ALL, + NULL); + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); + + kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_CLEAR), 0xFFFFFFFF, + NULL); + kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_MASK), 0xFFFFFFFF, NULL); + + kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), 0xFFFFFFFF, NULL); + kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), 0xFFFFFFFF, NULL); +} + +KBASE_EXPORT_TEST_API(kbase_pm_enable_interrupts); + +void kbase_pm_disable_interrupts_nolock(struct kbase_device *kbdev) +{ + KBASE_DEBUG_ASSERT(NULL != kbdev); + /* + * Mask all interrupts, + * and clear them all. + */ + lockdep_assert_held(&kbdev->hwaccess_lock); + + kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), 0, NULL); + kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_CLEAR), GPU_IRQ_REG_ALL, + NULL); + kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_MASK), 0, NULL); + kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_CLEAR), 0xFFFFFFFF, + NULL); + + kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), 0, NULL); + kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), 0xFFFFFFFF, NULL); +} + +void kbase_pm_disable_interrupts(struct kbase_device *kbdev) +{ + unsigned long flags; + + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); + kbase_pm_disable_interrupts_nolock(kbdev); + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); +} + +KBASE_EXPORT_TEST_API(kbase_pm_disable_interrupts); + + +/* + * pmu layout: + * 0x0000: PMU TAG (RO) (0xCAFECAFE) + * 0x0004: PMU VERSION ID (RO) (0x00000000) + * 0x0008: CLOCK ENABLE (RW) (31:1 SBZ, 0 CLOCK STATE) + */ +void kbase_pm_clock_on(struct kbase_device *kbdev, bool is_resume) +{ + bool reset_required = is_resume; + struct kbasep_js_device_data *js_devdata = &kbdev->js_data; + unsigned long flags; + + KBASE_DEBUG_ASSERT(NULL != kbdev); + lockdep_assert_held(&js_devdata->runpool_mutex); + lockdep_assert_held(&kbdev->pm.lock); + + if (kbdev->pm.backend.gpu_powered) { + /* Already turned on */ + if (kbdev->poweroff_pending) + kbase_pm_enable_interrupts(kbdev); + kbdev->poweroff_pending = false; + KBASE_DEBUG_ASSERT(!is_resume); + return; + } + + kbdev->poweroff_pending = false; + + KBASE_TRACE_ADD(kbdev, PM_GPU_ON, NULL, NULL, 0u, 0u); + + if (is_resume && kbdev->pm.backend.callback_power_resume) { + kbdev->pm.backend.callback_power_resume(kbdev); + return; + } else if (kbdev->pm.backend.callback_power_on) { + kbdev->pm.backend.callback_power_on(kbdev); + /* If your platform properly keeps the GPU state you may use the + * return value of the callback_power_on function to + * conditionally reset the GPU on power up. Currently we are + * conservative and always reset the GPU. */ + reset_required = true; + } + + spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock, flags); + kbdev->pm.backend.gpu_powered = true; + spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, flags); + + if (reset_required) { + /* GPU state was lost, reset GPU to ensure it is in a + * consistent state */ + kbase_pm_init_hw(kbdev, PM_ENABLE_IRQS); + } + + mutex_lock(&kbdev->mmu_hw_mutex); + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); + kbase_ctx_sched_restore_all_as(kbdev); + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); + mutex_unlock(&kbdev->mmu_hw_mutex); + + /* Lastly, enable the interrupts */ + kbase_pm_enable_interrupts(kbdev); +} + +KBASE_EXPORT_TEST_API(kbase_pm_clock_on); + +bool kbase_pm_clock_off(struct kbase_device *kbdev, bool is_suspend) +{ + unsigned long flags; + + KBASE_DEBUG_ASSERT(NULL != kbdev); + lockdep_assert_held(&kbdev->pm.lock); + + /* ASSERT that the cores should now be unavailable. No lock needed. */ + KBASE_DEBUG_ASSERT(kbdev->shader_available_bitmap == 0u); + + kbdev->poweroff_pending = true; + + if (!kbdev->pm.backend.gpu_powered) { + /* Already turned off */ + if (is_suspend && kbdev->pm.backend.callback_power_suspend) + kbdev->pm.backend.callback_power_suspend(kbdev); + return true; + } + + KBASE_TRACE_ADD(kbdev, PM_GPU_OFF, NULL, NULL, 0u, 0u); + + /* Disable interrupts. This also clears any outstanding interrupts */ + kbase_pm_disable_interrupts(kbdev); + /* Ensure that any IRQ handlers have finished */ + kbase_synchronize_irqs(kbdev); + + spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock, flags); + + if (atomic_read(&kbdev->faults_pending)) { + /* Page/bus faults are still being processed. The GPU can not + * be powered off until they have completed */ + spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, + flags); + return false; + } + + kbase_pm_cache_snoop_disable(kbdev); + + /* The GPU power may be turned off from this point */ + kbdev->pm.backend.gpu_powered = false; + spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, flags); + + if (is_suspend && kbdev->pm.backend.callback_power_suspend) + kbdev->pm.backend.callback_power_suspend(kbdev); + else if (kbdev->pm.backend.callback_power_off) + kbdev->pm.backend.callback_power_off(kbdev); + return true; +} + +KBASE_EXPORT_TEST_API(kbase_pm_clock_off); + +struct kbasep_reset_timeout_data { + struct hrtimer timer; + bool timed_out; + struct kbase_device *kbdev; +}; + +void kbase_pm_reset_done(struct kbase_device *kbdev) +{ + KBASE_DEBUG_ASSERT(kbdev != NULL); + kbdev->pm.backend.reset_done = true; + wake_up(&kbdev->pm.backend.reset_done_wait); +} + +/** + * kbase_pm_wait_for_reset - Wait for a reset to happen + * + * Wait for the %RESET_COMPLETED IRQ to occur, then reset the waiting state. + * + * @kbdev: Kbase device + */ +static void kbase_pm_wait_for_reset(struct kbase_device *kbdev) +{ + lockdep_assert_held(&kbdev->pm.lock); + + wait_event(kbdev->pm.backend.reset_done_wait, + (kbdev->pm.backend.reset_done)); + kbdev->pm.backend.reset_done = false; +} + +KBASE_EXPORT_TEST_API(kbase_pm_reset_done); + +static enum hrtimer_restart kbasep_reset_timeout(struct hrtimer *timer) +{ + struct kbasep_reset_timeout_data *rtdata = + container_of(timer, struct kbasep_reset_timeout_data, timer); + + rtdata->timed_out = 1; + + /* Set the wait queue to wake up kbase_pm_init_hw even though the reset + * hasn't completed */ + kbase_pm_reset_done(rtdata->kbdev); + + return HRTIMER_NORESTART; +} + +static void kbase_pm_hw_issues_detect(struct kbase_device *kbdev) +{ + struct device_node *np = kbdev->dev->of_node; + u32 jm_values[4]; + const u32 gpu_id = kbdev->gpu_props.props.raw_props.gpu_id; + const u32 prod_id = (gpu_id & GPU_ID_VERSION_PRODUCT_ID) >> + GPU_ID_VERSION_PRODUCT_ID_SHIFT; + const u32 major = (gpu_id & GPU_ID_VERSION_MAJOR) >> + GPU_ID_VERSION_MAJOR_SHIFT; + + kbdev->hw_quirks_sc = 0; + + /* Needed due to MIDBASE-1494: LS_PAUSEBUFFER_DISABLE. See PRLAM-8443. + * and needed due to MIDGLES-3539. See PRLAM-11035 */ + if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8443) || + kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_11035)) + kbdev->hw_quirks_sc |= SC_LS_PAUSEBUFFER_DISABLE; + + /* Needed due to MIDBASE-2054: SDC_DISABLE_OQ_DISCARD. See PRLAM-10327. + */ + if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10327)) + kbdev->hw_quirks_sc |= SC_SDC_DISABLE_OQ_DISCARD; + +#ifdef CONFIG_MALI_PRFCNT_SET_SECONDARY + /* Enable alternative hardware counter selection if configured. */ + if (!GPU_ID_IS_NEW_FORMAT(prod_id)) + kbdev->hw_quirks_sc |= SC_ALT_COUNTERS; +#endif + + /* Needed due to MIDBASE-2795. ENABLE_TEXGRD_FLAGS. See PRLAM-10797. */ + if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10797)) + kbdev->hw_quirks_sc |= SC_ENABLE_TEXGRD_FLAGS; + + if (!kbase_hw_has_issue(kbdev, GPUCORE_1619)) { + if (prod_id < 0x750 || prod_id == 0x6956) /* T60x, T62x, T72x */ + kbdev->hw_quirks_sc |= SC_LS_ATTR_CHECK_DISABLE; + else if (prod_id >= 0x750 && prod_id <= 0x880) /* T76x, T8xx */ + kbdev->hw_quirks_sc |= SC_LS_ALLOW_ATTR_TYPES; + } + + if (!kbdev->hw_quirks_sc) + kbdev->hw_quirks_sc = kbase_reg_read(kbdev, + GPU_CONTROL_REG(SHADER_CONFIG), NULL); + + kbdev->hw_quirks_tiler = kbase_reg_read(kbdev, + GPU_CONTROL_REG(TILER_CONFIG), NULL); + + /* Set tiler clock gate override if required */ + if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_T76X_3953)) + kbdev->hw_quirks_tiler |= TC_CLOCK_GATE_OVERRIDE; + + /* Limit the GPU bus bandwidth if the platform needs this. */ + kbdev->hw_quirks_mmu = kbase_reg_read(kbdev, + GPU_CONTROL_REG(L2_MMU_CONFIG), NULL); + + + /* Limit read & write ID width for AXI */ + if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_3BIT_EXT_RW_L2_MMU_CONFIG)) { + kbdev->hw_quirks_mmu &= ~(L2_MMU_CONFIG_3BIT_LIMIT_EXTERNAL_READS); + kbdev->hw_quirks_mmu |= (DEFAULT_3BIT_ARID_LIMIT & 0x7) << + L2_MMU_CONFIG_3BIT_LIMIT_EXTERNAL_READS_SHIFT; + + kbdev->hw_quirks_mmu &= ~(L2_MMU_CONFIG_3BIT_LIMIT_EXTERNAL_WRITES); + kbdev->hw_quirks_mmu |= (DEFAULT_3BIT_AWID_LIMIT & 0x7) << + L2_MMU_CONFIG_3BIT_LIMIT_EXTERNAL_WRITES_SHIFT; + } else { + kbdev->hw_quirks_mmu &= ~(L2_MMU_CONFIG_LIMIT_EXTERNAL_READS); + kbdev->hw_quirks_mmu |= (DEFAULT_ARID_LIMIT & 0x3) << + L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_SHIFT; + + kbdev->hw_quirks_mmu &= ~(L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES); + kbdev->hw_quirks_mmu |= (DEFAULT_AWID_LIMIT & 0x3) << + L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_SHIFT; + } + + if (kbdev->system_coherency == COHERENCY_ACE) { + /* Allow memory configuration disparity to be ignored, we + * optimize the use of shared memory and thus we expect + * some disparity in the memory configuration */ + kbdev->hw_quirks_mmu |= L2_MMU_CONFIG_ALLOW_SNOOP_DISPARITY; + } + + kbdev->hw_quirks_jm = 0; + /* Only for T86x/T88x-based products after r2p0 */ + if (prod_id >= 0x860 && prod_id <= 0x880 && major >= 2) { + + if (of_property_read_u32_array(np, + "jm_config", + &jm_values[0], + ARRAY_SIZE(jm_values))) { + /* Entry not in device tree, use defaults */ + jm_values[0] = 0; + jm_values[1] = 0; + jm_values[2] = 0; + jm_values[3] = JM_MAX_JOB_THROTTLE_LIMIT; + } + + /* Limit throttle limit to 6 bits*/ + if (jm_values[3] > JM_MAX_JOB_THROTTLE_LIMIT) { + dev_dbg(kbdev->dev, "JOB_THROTTLE_LIMIT supplied in device tree is too large. Limiting to MAX (63)."); + jm_values[3] = JM_MAX_JOB_THROTTLE_LIMIT; + } + + /* Aggregate to one integer. */ + kbdev->hw_quirks_jm |= (jm_values[0] ? + JM_TIMESTAMP_OVERRIDE : 0); + kbdev->hw_quirks_jm |= (jm_values[1] ? + JM_CLOCK_GATE_OVERRIDE : 0); + kbdev->hw_quirks_jm |= (jm_values[2] ? + JM_JOB_THROTTLE_ENABLE : 0); + kbdev->hw_quirks_jm |= (jm_values[3] << + JM_JOB_THROTTLE_LIMIT_SHIFT); + + } else if (GPU_ID_IS_NEW_FORMAT(prod_id) && + (GPU_ID2_MODEL_MATCH_VALUE(prod_id) == + GPU_ID2_PRODUCT_TMIX)) { + /* Only for tMIx */ + u32 coherency_features; + + coherency_features = kbase_reg_read(kbdev, + GPU_CONTROL_REG(COHERENCY_FEATURES), NULL); + + /* (COHERENCY_ACE_LITE | COHERENCY_ACE) was incorrectly + * documented for tMIx so force correct value here. + */ + if (coherency_features == + COHERENCY_FEATURE_BIT(COHERENCY_ACE)) { + kbdev->hw_quirks_jm |= + (COHERENCY_ACE_LITE | COHERENCY_ACE) << + JM_FORCE_COHERENCY_FEATURES_SHIFT; + } + } + + if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_TLS_HASHING)) + kbdev->hw_quirks_sc |= SC_TLS_HASH_ENABLE; + + if (!kbdev->hw_quirks_jm) + kbdev->hw_quirks_jm = kbase_reg_read(kbdev, + GPU_CONTROL_REG(JM_CONFIG), NULL); + +#ifdef CONFIG_MALI_CORESTACK +#define MANUAL_POWER_CONTROL ((u32)(1 << 8)) + kbdev->hw_quirks_jm |= MANUAL_POWER_CONTROL; +#endif /* CONFIG_MALI_CORESTACK */ +} + +static void kbase_pm_hw_issues_apply(struct kbase_device *kbdev) +{ + kbase_reg_write(kbdev, GPU_CONTROL_REG(SHADER_CONFIG), + kbdev->hw_quirks_sc, NULL); + + kbase_reg_write(kbdev, GPU_CONTROL_REG(TILER_CONFIG), + kbdev->hw_quirks_tiler, NULL); + + kbase_reg_write(kbdev, GPU_CONTROL_REG(L2_MMU_CONFIG), + kbdev->hw_quirks_mmu, NULL); + + kbase_reg_write(kbdev, GPU_CONTROL_REG(JM_CONFIG), + kbdev->hw_quirks_jm, NULL); + +} + +void kbase_pm_cache_snoop_enable(struct kbase_device *kbdev) +{ + if ((kbdev->current_gpu_coherency_mode == COHERENCY_ACE) && + !kbdev->cci_snoop_enabled) { +#ifdef CONFIG_ARM64 + if (kbdev->snoop_enable_smc != 0) + kbase_invoke_smc_fid(kbdev->snoop_enable_smc, 0, 0, 0); +#endif /* CONFIG_ARM64 */ + dev_dbg(kbdev->dev, "MALI - CCI Snoops - Enabled\n"); + kbdev->cci_snoop_enabled = true; + } +} + +void kbase_pm_cache_snoop_disable(struct kbase_device *kbdev) +{ + if (kbdev->cci_snoop_enabled) { +#ifdef CONFIG_ARM64 + if (kbdev->snoop_disable_smc != 0) { + mali_cci_flush_l2(kbdev); + kbase_invoke_smc_fid(kbdev->snoop_disable_smc, 0, 0, 0); + } +#endif /* CONFIG_ARM64 */ + dev_dbg(kbdev->dev, "MALI - CCI Snoops Disabled\n"); + kbdev->cci_snoop_enabled = false; + } +} + +static int kbase_pm_do_reset(struct kbase_device *kbdev) +{ + struct kbasep_reset_timeout_data rtdata; + + KBASE_TRACE_ADD(kbdev, CORE_GPU_SOFT_RESET, NULL, NULL, 0u, 0); + + KBASE_TLSTREAM_JD_GPU_SOFT_RESET(kbdev); + + kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND), + GPU_COMMAND_SOFT_RESET, NULL); + + /* Unmask the reset complete interrupt only */ + kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), RESET_COMPLETED, + NULL); + + /* Initialize a structure for tracking the status of the reset */ + rtdata.kbdev = kbdev; + rtdata.timed_out = 0; + + /* Create a timer to use as a timeout on the reset */ + hrtimer_init_on_stack(&rtdata.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + rtdata.timer.function = kbasep_reset_timeout; + + hrtimer_start(&rtdata.timer, HR_TIMER_DELAY_MSEC(RESET_TIMEOUT), + HRTIMER_MODE_REL); + + /* Wait for the RESET_COMPLETED interrupt to be raised */ + kbase_pm_wait_for_reset(kbdev); + + if (rtdata.timed_out == 0) { + /* GPU has been reset */ + hrtimer_cancel(&rtdata.timer); + destroy_hrtimer_on_stack(&rtdata.timer); + return 0; + } + + /* No interrupt has been received - check if the RAWSTAT register says + * the reset has completed */ + if (kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_RAWSTAT), NULL) & + RESET_COMPLETED) { + /* The interrupt is set in the RAWSTAT; this suggests that the + * interrupts are not getting to the CPU */ + dev_err(kbdev->dev, "Reset interrupt didn't reach CPU. Check interrupt assignments.\n"); + /* If interrupts aren't working we can't continue. */ + destroy_hrtimer_on_stack(&rtdata.timer); + return -EINVAL; + } + + /* The GPU doesn't seem to be responding to the reset so try a hard + * reset */ + dev_err(kbdev->dev, "Failed to soft-reset GPU (timed out after %d ms), now attempting a hard reset\n", + RESET_TIMEOUT); + KBASE_TRACE_ADD(kbdev, CORE_GPU_HARD_RESET, NULL, NULL, 0u, 0); + kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND), + GPU_COMMAND_HARD_RESET, NULL); + + /* Restart the timer to wait for the hard reset to complete */ + rtdata.timed_out = 0; + + hrtimer_start(&rtdata.timer, HR_TIMER_DELAY_MSEC(RESET_TIMEOUT), + HRTIMER_MODE_REL); + + /* Wait for the RESET_COMPLETED interrupt to be raised */ + kbase_pm_wait_for_reset(kbdev); + + if (rtdata.timed_out == 0) { + /* GPU has been reset */ + hrtimer_cancel(&rtdata.timer); + destroy_hrtimer_on_stack(&rtdata.timer); + return 0; + } + + destroy_hrtimer_on_stack(&rtdata.timer); + + dev_err(kbdev->dev, "Failed to hard-reset the GPU (timed out after %d ms)\n", + RESET_TIMEOUT); + + return -EINVAL; +} + +static int kbasep_protected_mode_enable(struct protected_mode_device *pdev) +{ + struct kbase_device *kbdev = pdev->data; + + kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND), + GPU_COMMAND_SET_PROTECTED_MODE, NULL); + return 0; +} + +static int kbasep_protected_mode_disable(struct protected_mode_device *pdev) +{ + struct kbase_device *kbdev = pdev->data; + + lockdep_assert_held(&kbdev->pm.lock); + + return kbase_pm_do_reset(kbdev); +} + +struct protected_mode_ops kbase_native_protected_ops = { + .protected_mode_enable = kbasep_protected_mode_enable, + .protected_mode_disable = kbasep_protected_mode_disable +}; + +int kbase_pm_init_hw(struct kbase_device *kbdev, unsigned int flags) +{ + unsigned long irq_flags; + int err; + bool resume_vinstr = false; + + KBASE_DEBUG_ASSERT(NULL != kbdev); + lockdep_assert_held(&kbdev->pm.lock); + + /* Ensure the clock is on before attempting to access the hardware */ + if (!kbdev->pm.backend.gpu_powered) { + if (kbdev->pm.backend.callback_power_on) + kbdev->pm.backend.callback_power_on(kbdev); + + spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock, + irq_flags); + kbdev->pm.backend.gpu_powered = true; + spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, + irq_flags); + } + + /* Ensure interrupts are off to begin with, this also clears any + * outstanding interrupts */ + kbase_pm_disable_interrupts(kbdev); + /* Ensure cache snoops are disabled before reset. */ + kbase_pm_cache_snoop_disable(kbdev); + /* Prepare for the soft-reset */ + kbdev->pm.backend.reset_done = false; + + /* The cores should be made unavailable due to the reset */ + spin_lock_irqsave(&kbdev->hwaccess_lock, irq_flags); + if (kbdev->shader_available_bitmap != 0u) + KBASE_TRACE_ADD(kbdev, PM_CORES_CHANGE_AVAILABLE, NULL, + NULL, 0u, (u32)0u); + if (kbdev->tiler_available_bitmap != 0u) + KBASE_TRACE_ADD(kbdev, PM_CORES_CHANGE_AVAILABLE_TILER, + NULL, NULL, 0u, (u32)0u); + kbdev->shader_available_bitmap = 0u; + kbdev->tiler_available_bitmap = 0u; + kbdev->l2_available_bitmap = 0u; + spin_unlock_irqrestore(&kbdev->hwaccess_lock, irq_flags); + + /* Soft reset the GPU */ + if (kbdev->protected_mode_support) + err = kbdev->protected_ops->protected_mode_disable( + kbdev->protected_dev); + else + err = kbase_pm_do_reset(kbdev); + + spin_lock_irqsave(&kbdev->hwaccess_lock, irq_flags); + if (kbdev->protected_mode) + resume_vinstr = true; + kbdev->protected_mode = false; + kbase_ipa_model_use_configured_locked(kbdev); + + spin_unlock_irqrestore(&kbdev->hwaccess_lock, irq_flags); + + if (err) + goto exit; + + if (flags & PM_HW_ISSUES_DETECT) + kbase_pm_hw_issues_detect(kbdev); + + kbase_pm_hw_issues_apply(kbdev); + kbase_cache_set_coherency_mode(kbdev, kbdev->system_coherency); + + /* Sanity check protected mode was left after reset */ + if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_PROTECTED_MODE)) { + u32 gpu_status = kbase_reg_read(kbdev, + GPU_CONTROL_REG(GPU_STATUS), NULL); + + WARN_ON(gpu_status & GPU_STATUS_PROTECTED_MODE_ACTIVE); + } + + /* If cycle counter was in use re-enable it, enable_irqs will only be + * false when called from kbase_pm_powerup */ + if (kbdev->pm.backend.gpu_cycle_counter_requests && + (flags & PM_ENABLE_IRQS)) { + /* enable interrupts as the L2 may have to be powered on */ + kbase_pm_enable_interrupts(kbdev); + kbase_pm_request_l2_caches(kbdev); + + /* Re-enable the counters if we need to */ + spin_lock_irqsave( + &kbdev->pm.backend.gpu_cycle_counter_requests_lock, + irq_flags); + if (kbdev->pm.backend.gpu_cycle_counter_requests) + kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND), + GPU_COMMAND_CYCLE_COUNT_START, NULL); + spin_unlock_irqrestore( + &kbdev->pm.backend.gpu_cycle_counter_requests_lock, + irq_flags); + + spin_lock_irqsave(&kbdev->hwaccess_lock, irq_flags); + kbase_pm_release_l2_caches(kbdev); + spin_unlock_irqrestore(&kbdev->hwaccess_lock, irq_flags); + + kbase_pm_disable_interrupts(kbdev); + } + + if (flags & PM_ENABLE_IRQS) + kbase_pm_enable_interrupts(kbdev); + +exit: + /* If GPU is leaving protected mode resume vinstr operation. */ + if (kbdev->vinstr_ctx && resume_vinstr) + kbase_vinstr_resume(kbdev->vinstr_ctx); + + return err; +} + +/** + * kbase_pm_request_gpu_cycle_counter_do_request - Request cycle counters + * + * Increase the count of cycle counter users and turn the cycle counters on if + * they were previously off + * + * This function is designed to be called by + * kbase_pm_request_gpu_cycle_counter() or + * kbase_pm_request_gpu_cycle_counter_l2_is_on() only + * + * When this function is called the l2 cache must be on and the l2 cache users + * count must have been incremented by a call to ( + * kbase_pm_request_l2_caches() or kbase_pm_request_l2_caches_l2_on() ) + * + * @kbdev: The kbase device structure of the device + */ +static void +kbase_pm_request_gpu_cycle_counter_do_request(struct kbase_device *kbdev) +{ + unsigned long flags; + + spin_lock_irqsave(&kbdev->pm.backend.gpu_cycle_counter_requests_lock, + flags); + + ++kbdev->pm.backend.gpu_cycle_counter_requests; + + if (1 == kbdev->pm.backend.gpu_cycle_counter_requests) + kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND), + GPU_COMMAND_CYCLE_COUNT_START, NULL); + + spin_unlock_irqrestore( + &kbdev->pm.backend.gpu_cycle_counter_requests_lock, + flags); +} + +void kbase_pm_request_gpu_cycle_counter(struct kbase_device *kbdev) +{ + KBASE_DEBUG_ASSERT(kbdev != NULL); + + KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_powered); + + KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_cycle_counter_requests < + INT_MAX); + + kbase_pm_request_l2_caches(kbdev); + + kbase_pm_request_gpu_cycle_counter_do_request(kbdev); +} + +KBASE_EXPORT_TEST_API(kbase_pm_request_gpu_cycle_counter); + +void kbase_pm_request_gpu_cycle_counter_l2_is_on(struct kbase_device *kbdev) +{ + KBASE_DEBUG_ASSERT(kbdev != NULL); + + KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_powered); + + KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_cycle_counter_requests < + INT_MAX); + + kbase_pm_request_l2_caches_l2_is_on(kbdev); + + kbase_pm_request_gpu_cycle_counter_do_request(kbdev); +} + +KBASE_EXPORT_TEST_API(kbase_pm_request_gpu_cycle_counter_l2_is_on); + +void kbase_pm_release_gpu_cycle_counter_nolock(struct kbase_device *kbdev) +{ + unsigned long flags; + + KBASE_DEBUG_ASSERT(kbdev != NULL); + + lockdep_assert_held(&kbdev->hwaccess_lock); + + spin_lock_irqsave(&kbdev->pm.backend.gpu_cycle_counter_requests_lock, + flags); + + KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_cycle_counter_requests > 0); + + --kbdev->pm.backend.gpu_cycle_counter_requests; + + if (0 == kbdev->pm.backend.gpu_cycle_counter_requests) + kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND), + GPU_COMMAND_CYCLE_COUNT_STOP, NULL); + + spin_unlock_irqrestore( + &kbdev->pm.backend.gpu_cycle_counter_requests_lock, + flags); + + kbase_pm_release_l2_caches(kbdev); +} + +void kbase_pm_release_gpu_cycle_counter(struct kbase_device *kbdev) +{ + unsigned long flags; + + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); + + kbase_pm_release_gpu_cycle_counter_nolock(kbdev); + + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); +} + +KBASE_EXPORT_TEST_API(kbase_pm_release_gpu_cycle_counter); diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_internal.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_internal.h new file mode 100644 index 00000000000000..7b778239dfa3c5 --- /dev/null +++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_internal.h @@ -0,0 +1,563 @@ +/* + * + * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + + +/* + * Power management API definitions used internally by GPU backend + */ + +#ifndef _KBASE_BACKEND_PM_INTERNAL_H_ +#define _KBASE_BACKEND_PM_INTERNAL_H_ + +#include + +#include "mali_kbase_pm_ca.h" +#include "mali_kbase_pm_policy.h" + + +/** + * kbase_pm_dev_idle - The GPU is idle. + * + * The OS may choose to turn off idle devices + * + * @kbdev: The kbase device structure for the device (must be a valid pointer) + */ +void kbase_pm_dev_idle(struct kbase_device *kbdev); + +/** + * kbase_pm_dev_activate - The GPU is active. + * + * The OS should avoid opportunistically turning off the GPU while it is active + * + * @kbdev: The kbase device structure for the device (must be a valid pointer) + */ +void kbase_pm_dev_activate(struct kbase_device *kbdev); + +/** + * kbase_pm_get_present_cores - Get details of the cores that are present in + * the device. + * + * This function can be called by the active power policy to return a bitmask of + * the cores (of a specified type) present in the GPU device and also a count of + * the number of cores. + * + * @kbdev: The kbase device structure for the device (must be a valid + * pointer) + * @type: The type of core (see the enum kbase_pm_core_type enumeration) + * + * Return: The bit mask of cores present + */ +u64 kbase_pm_get_present_cores(struct kbase_device *kbdev, + enum kbase_pm_core_type type); + +/** + * kbase_pm_get_active_cores - Get details of the cores that are currently + * active in the device. + * + * This function can be called by the active power policy to return a bitmask of + * the cores (of a specified type) that are actively processing work (i.e. + * turned on *and* busy). + * + * @kbdev: The kbase device structure for the device (must be a valid pointer) + * @type: The type of core (see the enum kbase_pm_core_type enumeration) + * + * Return: The bit mask of active cores + */ +u64 kbase_pm_get_active_cores(struct kbase_device *kbdev, + enum kbase_pm_core_type type); + +/** + * kbase_pm_get_trans_cores - Get details of the cores that are currently + * transitioning between power states. + * + * This function can be called by the active power policy to return a bitmask of + * the cores (of a specified type) that are currently transitioning between + * power states. + * + * @kbdev: The kbase device structure for the device (must be a valid pointer) + * @type: The type of core (see the enum kbase_pm_core_type enumeration) + * + * Return: The bit mask of transitioning cores + */ +u64 kbase_pm_get_trans_cores(struct kbase_device *kbdev, + enum kbase_pm_core_type type); + +/** + * kbase_pm_get_ready_cores - Get details of the cores that are currently + * powered and ready for jobs. + * + * This function can be called by the active power policy to return a bitmask of + * the cores (of a specified type) that are powered and ready for jobs (they may + * or may not be currently executing jobs). + * + * @kbdev: The kbase device structure for the device (must be a valid pointer) + * @type: The type of core (see the enum kbase_pm_core_type enumeration) + * + * Return: The bit mask of ready cores + */ +u64 kbase_pm_get_ready_cores(struct kbase_device *kbdev, + enum kbase_pm_core_type type); + +/** + * kbase_pm_clock_on - Turn the clock for the device on, and enable device + * interrupts. + * + * This function can be used by a power policy to turn the clock for the GPU on. + * It should be modified during integration to perform the necessary actions to + * ensure that the GPU is fully powered and clocked. + * + * @kbdev: The kbase device structure for the device (must be a valid + * pointer) + * @is_resume: true if clock on due to resume after suspend, false otherwise + */ +void kbase_pm_clock_on(struct kbase_device *kbdev, bool is_resume); + +/** + * kbase_pm_clock_off - Disable device interrupts, and turn the clock for the + * device off. + * + * This function can be used by a power policy to turn the clock for the GPU + * off. It should be modified during integration to perform the necessary + * actions to turn the clock off (if this is possible in the integration). + * + * @kbdev: The kbase device structure for the device (must be a valid + * pointer) + * @is_suspend: true if clock off due to suspend, false otherwise + * + * Return: true if clock was turned off, or + * false if clock can not be turned off due to pending page/bus fault + * workers. Caller must flush MMU workqueues and retry + */ +bool kbase_pm_clock_off(struct kbase_device *kbdev, bool is_suspend); + +/** + * kbase_pm_enable_interrupts - Enable interrupts on the device. + * + * Interrupts are also enabled after a call to kbase_pm_clock_on(). + * + * @kbdev: The kbase device structure for the device (must be a valid pointer) + */ +void kbase_pm_enable_interrupts(struct kbase_device *kbdev); + +/** + * kbase_pm_disable_interrupts - Disable interrupts on the device. + * + * This prevents delivery of Power Management interrupts to the CPU so that + * kbase_pm_check_transitions_nolock() will not be called from the IRQ handler + * until kbase_pm_enable_interrupts() or kbase_pm_clock_on() is called. + * + * Interrupts are also disabled after a call to kbase_pm_clock_off(). + * + * @kbdev: The kbase device structure for the device (must be a valid pointer) + */ +void kbase_pm_disable_interrupts(struct kbase_device *kbdev); + +/** + * kbase_pm_disable_interrupts_nolock - Version of kbase_pm_disable_interrupts() + * that does not take the hwaccess_lock + * + * Caller must hold the hwaccess_lock. + * + * @kbdev: The kbase device structure for the device (must be a valid pointer) + */ +void kbase_pm_disable_interrupts_nolock(struct kbase_device *kbdev); + +/** + * kbase_pm_init_hw - Initialize the hardware. + * @kbdev: The kbase device structure for the device (must be a valid pointer) + * @flags: Flags specifying the type of PM init + * + * This function checks the GPU ID register to ensure that the GPU is supported + * by the driver and performs a reset on the device so that it is in a known + * state before the device is used. + * + * Return: 0 if the device is supported and successfully reset. + */ +int kbase_pm_init_hw(struct kbase_device *kbdev, unsigned int flags); + +/** + * kbase_pm_reset_done - The GPU has been reset successfully. + * + * This function must be called by the GPU interrupt handler when the + * RESET_COMPLETED bit is set. It signals to the power management initialization + * code that the GPU has been successfully reset. + * + * @kbdev: The kbase device structure for the device (must be a valid pointer) + */ +void kbase_pm_reset_done(struct kbase_device *kbdev); + + +/** + * kbase_pm_check_transitions_nolock - Check if there are any power transitions + * to make, and if so start them. + * + * This function will check the desired_xx_state members of + * struct kbase_pm_device_data and the actual status of the hardware to see if + * any power transitions can be made at this time to make the hardware state + * closer to the state desired by the power policy. + * + * The return value can be used to check whether all the desired cores are + * available, and so whether it's worth submitting a job (e.g. from a Power + * Management IRQ). + * + * Note that this still returns true when desired_xx_state has no + * cores. That is: of the no cores desired, none were *un*available. In + * this case, the caller may still need to try submitting jobs. This is because + * the Core Availability Policy might have taken us to an intermediate state + * where no cores are powered, before powering on more cores (e.g. for core + * rotation) + * + * The caller must hold kbase_device.pm.power_change_lock + * + * @kbdev: The kbase device structure for the device (must be a valid pointer) + * + * Return: non-zero when all desired cores are available. That is, + * it's worthwhile for the caller to submit a job. + * false otherwise + */ +bool kbase_pm_check_transitions_nolock(struct kbase_device *kbdev); + +/** + * kbase_pm_check_transitions_sync - Synchronous and locking variant of + * kbase_pm_check_transitions_nolock() + * + * On returning, the desired state at the time of the call will have been met. + * + * There is nothing to stop the core being switched off by calls to + * kbase_pm_release_cores() or kbase_pm_unrequest_cores(). Therefore, the + * caller must have already made a call to + * kbase_pm_request_cores()/kbase_pm_request_cores_sync() previously. + * + * The usual use-case for this is to ensure cores are 'READY' after performing + * a GPU Reset. + * + * Unlike kbase_pm_check_transitions_nolock(), the caller must not hold + * kbase_device.pm.power_change_lock, because this function will take that + * lock itself. + * + * @kbdev: The kbase device structure for the device (must be a valid pointer) + */ +void kbase_pm_check_transitions_sync(struct kbase_device *kbdev); + +/** + * kbase_pm_update_cores_state_nolock - Variant of kbase_pm_update_cores_state() + * where the caller must hold + * kbase_device.pm.power_change_lock + * + * @kbdev: The kbase device structure for the device (must be a valid pointer) + */ +void kbase_pm_update_cores_state_nolock(struct kbase_device *kbdev); + +/** + * kbase_pm_update_cores_state - Update the desired state of shader cores from + * the Power Policy, and begin any power + * transitions. + * + * This function will update the desired_xx_state members of + * struct kbase_pm_device_data by calling into the current Power Policy. It will + * then begin power transitions to make the hardware acheive the desired shader + * core state. + * + * @kbdev: The kbase device structure for the device (must be a valid pointer) + */ +void kbase_pm_update_cores_state(struct kbase_device *kbdev); + +/** + * kbase_pm_cancel_deferred_poweroff - Cancel any pending requests to power off + * the GPU and/or shader cores. + * + * This should be called by any functions which directly power off the GPU. + * + * @kbdev: The kbase device structure for the device (must be a valid pointer) + */ +void kbase_pm_cancel_deferred_poweroff(struct kbase_device *kbdev); + +/** + * kbasep_pm_init_core_use_bitmaps - Initialise data tracking the required + * and used cores. + * + * @kbdev: The kbase device structure for the device (must be a valid pointer) + */ +void kbasep_pm_init_core_use_bitmaps(struct kbase_device *kbdev); + +/** + * kbasep_pm_metrics_init - Initialize the metrics gathering framework. + * + * This must be called before other metric gathering APIs are called. + * + * @kbdev: The kbase device structure for the device (must be a valid pointer) + * + * Return: 0 on success, error code on error + */ +int kbasep_pm_metrics_init(struct kbase_device *kbdev); + +/** + * kbasep_pm_metrics_term - Terminate the metrics gathering framework. + * + * This must be called when metric gathering is no longer required. It is an + * error to call any metrics gathering function (other than + * kbasep_pm_metrics_init()) after calling this function. + * + * @kbdev: The kbase device structure for the device (must be a valid pointer) + */ +void kbasep_pm_metrics_term(struct kbase_device *kbdev); + +/** + * kbase_pm_report_vsync - Function to be called by the frame buffer driver to + * update the vsync metric. + * + * This function should be called by the frame buffer driver to update whether + * the system is hitting the vsync target or not. buffer_updated should be true + * if the vsync corresponded with a new frame being displayed, otherwise it + * should be false. This function does not need to be called every vsync, but + * only when the value of @buffer_updated differs from a previous call. + * + * @kbdev: The kbase device structure for the device (must be a + * valid pointer) + * @buffer_updated: True if the buffer has been updated on this VSync, + * false otherwise + */ +void kbase_pm_report_vsync(struct kbase_device *kbdev, int buffer_updated); + +/** + * kbase_pm_get_dvfs_action - Determine whether the DVFS system should change + * the clock speed of the GPU. + * + * @kbdev: The kbase device structure for the device (must be a valid pointer) + * + * This function should be called regularly by the DVFS system to check whether + * the clock speed of the GPU needs updating. + */ +void kbase_pm_get_dvfs_action(struct kbase_device *kbdev); + +/** + * kbase_pm_request_gpu_cycle_counter - Mark that the GPU cycle counter is + * needed + * + * If the caller is the first caller then the GPU cycle counters will be enabled + * along with the l2 cache + * + * The GPU must be powered when calling this function (i.e. + * kbase_pm_context_active() must have been called). + * + * @kbdev: The kbase device structure for the device (must be a valid pointer) + */ +void kbase_pm_request_gpu_cycle_counter(struct kbase_device *kbdev); + +/** + * kbase_pm_request_gpu_cycle_counter_l2_is_on - Mark GPU cycle counter is + * needed (l2 cache already on) + * + * This is a version of the above function + * (kbase_pm_request_gpu_cycle_counter()) suitable for being called when the + * l2 cache is known to be on and assured to be on until the subsequent call of + * kbase_pm_release_gpu_cycle_counter() such as when a job is submitted. It does + * not sleep and can be called from atomic functions. + * + * The GPU must be powered when calling this function (i.e. + * kbase_pm_context_active() must have been called) and the l2 cache must be + * powered on. + * + * @kbdev: The kbase device structure for the device (must be a valid pointer) + */ +void kbase_pm_request_gpu_cycle_counter_l2_is_on(struct kbase_device *kbdev); + +/** + * kbase_pm_release_gpu_cycle_counter - Mark that the GPU cycle counter is no + * longer in use + * + * If the caller is the last caller then the GPU cycle counters will be + * disabled. A request must have been made before a call to this. + * + * Caller must not hold the hwaccess_lock, as it will be taken in this function. + * If the caller is already holding this lock then + * kbase_pm_release_gpu_cycle_counter_nolock() must be used instead. + * + * @kbdev: The kbase device structure for the device (must be a valid pointer) + */ +void kbase_pm_release_gpu_cycle_counter(struct kbase_device *kbdev); + +/** + * kbase_pm_release_gpu_cycle_counter_nolock - Version of kbase_pm_release_gpu_cycle_counter() + * that does not take hwaccess_lock + * + * Caller must hold the hwaccess_lock. + * + * @kbdev: The kbase device structure for the device (must be a valid pointer) + */ +void kbase_pm_release_gpu_cycle_counter_nolock(struct kbase_device *kbdev); + +/** + * kbase_pm_wait_for_poweroff_complete - Wait for the poweroff workqueue to + * complete + * + * @kbdev: The kbase device structure for the device (must be a valid pointer) + */ +void kbase_pm_wait_for_poweroff_complete(struct kbase_device *kbdev); + +/** + * kbase_pm_runtime_init - Initialize runtime-pm for Mali GPU platform device + * + * Setup the power management callbacks and initialize/enable the runtime-pm + * for the Mali GPU platform device, using the callback function. This must be + * called before the kbase_pm_register_access_enable() function. + * + * @kbdev: The kbase device structure for the device (must be a valid pointer) + */ +int kbase_pm_runtime_init(struct kbase_device *kbdev); + +/** + * kbase_pm_runtime_term - Disable runtime-pm for Mali GPU platform device + * + * @kbdev: The kbase device structure for the device (must be a valid pointer) + */ +void kbase_pm_runtime_term(struct kbase_device *kbdev); + +/** + * kbase_pm_register_access_enable - Enable access to GPU registers + * + * Enables access to the GPU registers before power management has powered up + * the GPU with kbase_pm_powerup(). + * + * This results in the power management callbacks provided in the driver + * configuration to get called to turn on power and/or clocks to the GPU. See + * kbase_pm_callback_conf. + * + * This should only be used before power management is powered up with + * kbase_pm_powerup() + * + * @kbdev: The kbase device structure for the device (must be a valid pointer) + */ +void kbase_pm_register_access_enable(struct kbase_device *kbdev); + +/** + * kbase_pm_register_access_disable - Disable early register access + * + * Disables access to the GPU registers enabled earlier by a call to + * kbase_pm_register_access_enable(). + * + * This results in the power management callbacks provided in the driver + * configuration to get called to turn off power and/or clocks to the GPU. See + * kbase_pm_callback_conf + * + * This should only be used before power management is powered up with + * kbase_pm_powerup() + * + * @kbdev: The kbase device structure for the device (must be a valid pointer) + */ +void kbase_pm_register_access_disable(struct kbase_device *kbdev); + +/* NOTE: kbase_pm_is_suspending is in mali_kbase.h, because it is an inline + * function */ + +/** + * kbase_pm_metrics_is_active - Check if the power management metrics + * collection is active. + * + * Note that this returns if the power management metrics collection was + * active at the time of calling, it is possible that after the call the metrics + * collection enable may have changed state. + * + * The caller must handle the consequence that the state may have changed. + * + * @kbdev: The kbase device structure for the device (must be a valid pointer) + * Return: true if metrics collection was active else false. + */ +bool kbase_pm_metrics_is_active(struct kbase_device *kbdev); + +/** + * kbase_pm_do_poweron - Power on the GPU, and any cores that are requested. + * + * @kbdev: The kbase device structure for the device (must be a valid + * pointer) + * @is_resume: true if power on due to resume after suspend, + * false otherwise + */ +void kbase_pm_do_poweron(struct kbase_device *kbdev, bool is_resume); + +/** + * kbase_pm_do_poweroff - Power off the GPU, and any cores that have been + * requested. + * + * @kbdev: The kbase device structure for the device (must be a valid + * pointer) + * @is_suspend: true if power off due to suspend, + * false otherwise + */ +void kbase_pm_do_poweroff(struct kbase_device *kbdev, bool is_suspend); + +#if defined(CONFIG_MALI_DEVFREQ) || defined(CONFIG_MALI_MIDGARD_DVFS) +void kbase_pm_get_dvfs_utilisation(struct kbase_device *kbdev, + unsigned long *total, unsigned long *busy); +void kbase_pm_reset_dvfs_utilisation(struct kbase_device *kbdev); +#endif /* defined(CONFIG_MALI_DEVFREQ) || defined(CONFIG_MALI_MIDGARD_DVFS) */ + +#ifdef CONFIG_MALI_MIDGARD_DVFS + +/** + * kbase_platform_dvfs_event - Report utilisation to DVFS code + * + * Function provided by platform specific code when DVFS is enabled to allow + * the power management metrics system to report utilisation. + * + * @kbdev: The kbase device structure for the device (must be a + * valid pointer) + * @utilisation: The current calculated utilisation by the metrics system. + * @util_gl_share: The current calculated gl share of utilisation. + * @util_cl_share: The current calculated cl share of utilisation per core + * group. + * Return: Returns 0 on failure and non zero on success. + */ + +int kbase_platform_dvfs_event(struct kbase_device *kbdev, u32 utilisation, + u32 util_gl_share, u32 util_cl_share[2]); +#endif + +void kbase_pm_power_changed(struct kbase_device *kbdev); + +/** + * kbase_pm_metrics_update - Inform the metrics system that an atom is either + * about to be run or has just completed. + * @kbdev: The kbase device structure for the device (must be a valid pointer) + * @now: Pointer to the timestamp of the change, or NULL to use current time + * + * Caller must hold hwaccess_lock + */ +void kbase_pm_metrics_update(struct kbase_device *kbdev, + ktime_t *now); + +/** + * kbase_pm_cache_snoop_enable - Allow CPU snoops on the GPU + * If the GPU does not have coherency this is a no-op + * @kbdev: Device pointer + * + * This function should be called after L2 power up. + */ + +void kbase_pm_cache_snoop_enable(struct kbase_device *kbdev); + +/** + * kbase_pm_cache_snoop_disable - Prevent CPU snoops on the GPU + * If the GPU does not have coherency this is a no-op + * @kbdev: Device pointer + * + * This function should be called before L2 power off. + */ +void kbase_pm_cache_snoop_disable(struct kbase_device *kbdev); + +#endif /* _KBASE_BACKEND_PM_INTERNAL_H_ */ diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_metrics.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_metrics.c new file mode 100644 index 00000000000000..024248ca712341 --- /dev/null +++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_metrics.c @@ -0,0 +1,401 @@ +/* + * + * (C) COPYRIGHT 2011-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + + +/* + * Metrics for power management + */ + +#include +#include +#include +#include + +/* When VSync is being hit aim for utilisation between 70-90% */ +#define KBASE_PM_VSYNC_MIN_UTILISATION 70 +#define KBASE_PM_VSYNC_MAX_UTILISATION 90 +/* Otherwise aim for 10-40% */ +#define KBASE_PM_NO_VSYNC_MIN_UTILISATION 10 +#define KBASE_PM_NO_VSYNC_MAX_UTILISATION 40 + +/* Shift used for kbasep_pm_metrics_data.time_busy/idle - units of (1 << 8) ns + * This gives a maximum period between samples of 2^(32+8)/100 ns = slightly + * under 11s. Exceeding this will cause overflow */ +#define KBASE_PM_TIME_SHIFT 8 + +/* Maximum time between sampling of utilization data, without resetting the + * counters. */ +#define MALI_UTILIZATION_MAX_PERIOD 100000 /* ns = 100ms */ + +#ifdef CONFIG_MALI_MIDGARD_DVFS +static enum hrtimer_restart dvfs_callback(struct hrtimer *timer) +{ + unsigned long flags; + struct kbasep_pm_metrics_data *metrics; + + KBASE_DEBUG_ASSERT(timer != NULL); + + metrics = container_of(timer, struct kbasep_pm_metrics_data, timer); + kbase_pm_get_dvfs_action(metrics->kbdev); + + spin_lock_irqsave(&metrics->lock, flags); + + if (metrics->timer_active) + hrtimer_start(timer, + HR_TIMER_DELAY_MSEC(metrics->kbdev->pm.dvfs_period), + HRTIMER_MODE_REL); + + spin_unlock_irqrestore(&metrics->lock, flags); + + return HRTIMER_NORESTART; +} +#endif /* CONFIG_MALI_MIDGARD_DVFS */ + +int kbasep_pm_metrics_init(struct kbase_device *kbdev) +{ + KBASE_DEBUG_ASSERT(kbdev != NULL); + + kbdev->pm.backend.metrics.kbdev = kbdev; + + kbdev->pm.backend.metrics.time_period_start = ktime_get(); + kbdev->pm.backend.metrics.time_busy = 0; + kbdev->pm.backend.metrics.time_idle = 0; + kbdev->pm.backend.metrics.prev_busy = 0; + kbdev->pm.backend.metrics.prev_idle = 0; + kbdev->pm.backend.metrics.gpu_active = false; + kbdev->pm.backend.metrics.active_cl_ctx[0] = 0; + kbdev->pm.backend.metrics.active_cl_ctx[1] = 0; + kbdev->pm.backend.metrics.active_gl_ctx[0] = 0; + kbdev->pm.backend.metrics.active_gl_ctx[1] = 0; + kbdev->pm.backend.metrics.busy_cl[0] = 0; + kbdev->pm.backend.metrics.busy_cl[1] = 0; + kbdev->pm.backend.metrics.busy_gl = 0; + + spin_lock_init(&kbdev->pm.backend.metrics.lock); + +#ifdef CONFIG_MALI_MIDGARD_DVFS + kbdev->pm.backend.metrics.timer_active = true; + hrtimer_init(&kbdev->pm.backend.metrics.timer, CLOCK_MONOTONIC, + HRTIMER_MODE_REL); + kbdev->pm.backend.metrics.timer.function = dvfs_callback; + + hrtimer_start(&kbdev->pm.backend.metrics.timer, + HR_TIMER_DELAY_MSEC(kbdev->pm.dvfs_period), + HRTIMER_MODE_REL); +#endif /* CONFIG_MALI_MIDGARD_DVFS */ + + return 0; +} + +KBASE_EXPORT_TEST_API(kbasep_pm_metrics_init); + +void kbasep_pm_metrics_term(struct kbase_device *kbdev) +{ +#ifdef CONFIG_MALI_MIDGARD_DVFS + unsigned long flags; + + KBASE_DEBUG_ASSERT(kbdev != NULL); + + spin_lock_irqsave(&kbdev->pm.backend.metrics.lock, flags); + kbdev->pm.backend.metrics.timer_active = false; + spin_unlock_irqrestore(&kbdev->pm.backend.metrics.lock, flags); + + hrtimer_cancel(&kbdev->pm.backend.metrics.timer); +#endif /* CONFIG_MALI_MIDGARD_DVFS */ +} + +KBASE_EXPORT_TEST_API(kbasep_pm_metrics_term); + +/* caller needs to hold kbdev->pm.backend.metrics.lock before calling this + * function + */ +static void kbase_pm_get_dvfs_utilisation_calc(struct kbase_device *kbdev, + ktime_t now) +{ + ktime_t diff; + + lockdep_assert_held(&kbdev->pm.backend.metrics.lock); + + diff = ktime_sub(now, kbdev->pm.backend.metrics.time_period_start); + if (ktime_to_ns(diff) < 0) + return; + + if (kbdev->pm.backend.metrics.gpu_active) { + u32 ns_time = (u32) (ktime_to_ns(diff) >> KBASE_PM_TIME_SHIFT); + + kbdev->pm.backend.metrics.time_busy += ns_time; + if (kbdev->pm.backend.metrics.active_cl_ctx[0]) + kbdev->pm.backend.metrics.busy_cl[0] += ns_time; + if (kbdev->pm.backend.metrics.active_cl_ctx[1]) + kbdev->pm.backend.metrics.busy_cl[1] += ns_time; + if (kbdev->pm.backend.metrics.active_gl_ctx[0]) + kbdev->pm.backend.metrics.busy_gl += ns_time; + if (kbdev->pm.backend.metrics.active_gl_ctx[1]) + kbdev->pm.backend.metrics.busy_gl += ns_time; + } else { + kbdev->pm.backend.metrics.time_idle += (u32) (ktime_to_ns(diff) + >> KBASE_PM_TIME_SHIFT); + } + + kbdev->pm.backend.metrics.time_period_start = now; +} + +#if defined(CONFIG_MALI_DEVFREQ) || defined(CONFIG_MALI_MIDGARD_DVFS) +/* Caller needs to hold kbdev->pm.backend.metrics.lock before calling this + * function. + */ +static void kbase_pm_reset_dvfs_utilisation_unlocked(struct kbase_device *kbdev, + ktime_t now) +{ + /* Store previous value */ + kbdev->pm.backend.metrics.prev_idle = + kbdev->pm.backend.metrics.time_idle; + kbdev->pm.backend.metrics.prev_busy = + kbdev->pm.backend.metrics.time_busy; + + /* Reset current values */ + kbdev->pm.backend.metrics.time_period_start = now; + kbdev->pm.backend.metrics.time_idle = 0; + kbdev->pm.backend.metrics.time_busy = 0; + kbdev->pm.backend.metrics.busy_cl[0] = 0; + kbdev->pm.backend.metrics.busy_cl[1] = 0; + kbdev->pm.backend.metrics.busy_gl = 0; +} + +void kbase_pm_reset_dvfs_utilisation(struct kbase_device *kbdev) +{ + unsigned long flags; + + spin_lock_irqsave(&kbdev->pm.backend.metrics.lock, flags); + kbase_pm_reset_dvfs_utilisation_unlocked(kbdev, ktime_get()); + spin_unlock_irqrestore(&kbdev->pm.backend.metrics.lock, flags); +} + +void kbase_pm_get_dvfs_utilisation(struct kbase_device *kbdev, + unsigned long *total_out, unsigned long *busy_out) +{ + ktime_t now = ktime_get(); + unsigned long flags, busy, total; + + spin_lock_irqsave(&kbdev->pm.backend.metrics.lock, flags); + kbase_pm_get_dvfs_utilisation_calc(kbdev, now); + + busy = kbdev->pm.backend.metrics.time_busy; + total = busy + kbdev->pm.backend.metrics.time_idle; + + /* Reset stats if older than MALI_UTILIZATION_MAX_PERIOD (default + * 100ms) */ + if (total >= MALI_UTILIZATION_MAX_PERIOD) { + kbase_pm_reset_dvfs_utilisation_unlocked(kbdev, now); + } else if (total < (MALI_UTILIZATION_MAX_PERIOD / 2)) { + total += kbdev->pm.backend.metrics.prev_idle + + kbdev->pm.backend.metrics.prev_busy; + busy += kbdev->pm.backend.metrics.prev_busy; + } + + *total_out = total; + *busy_out = busy; + spin_unlock_irqrestore(&kbdev->pm.backend.metrics.lock, flags); +} +#endif + +#ifdef CONFIG_MALI_MIDGARD_DVFS + +/* caller needs to hold kbdev->pm.backend.metrics.lock before calling this + * function + */ +int kbase_pm_get_dvfs_utilisation_old(struct kbase_device *kbdev, + int *util_gl_share, + int util_cl_share[2], + ktime_t now) +{ + int utilisation; + int busy; + + kbase_pm_get_dvfs_utilisation_calc(kbdev, now); + + if (kbdev->pm.backend.metrics.time_idle + + kbdev->pm.backend.metrics.time_busy == 0) { + /* No data - so we return NOP */ + utilisation = -1; + if (util_gl_share) + *util_gl_share = -1; + if (util_cl_share) { + util_cl_share[0] = -1; + util_cl_share[1] = -1; + } + goto out; + } + + utilisation = (100 * kbdev->pm.backend.metrics.time_busy) / + (kbdev->pm.backend.metrics.time_idle + + kbdev->pm.backend.metrics.time_busy); + + busy = kbdev->pm.backend.metrics.busy_gl + + kbdev->pm.backend.metrics.busy_cl[0] + + kbdev->pm.backend.metrics.busy_cl[1]; + + if (busy != 0) { + if (util_gl_share) + *util_gl_share = + (100 * kbdev->pm.backend.metrics.busy_gl) / + busy; + if (util_cl_share) { + util_cl_share[0] = + (100 * kbdev->pm.backend.metrics.busy_cl[0]) / + busy; + util_cl_share[1] = + (100 * kbdev->pm.backend.metrics.busy_cl[1]) / + busy; + } + } else { + if (util_gl_share) + *util_gl_share = -1; + if (util_cl_share) { + util_cl_share[0] = -1; + util_cl_share[1] = -1; + } + } + +out: + return utilisation; +} + +void kbase_pm_get_dvfs_action(struct kbase_device *kbdev) +{ + unsigned long flags; + int utilisation, util_gl_share; + int util_cl_share[2]; + ktime_t now; + + KBASE_DEBUG_ASSERT(kbdev != NULL); + + spin_lock_irqsave(&kbdev->pm.backend.metrics.lock, flags); + + now = ktime_get(); + + utilisation = kbase_pm_get_dvfs_utilisation_old(kbdev, &util_gl_share, + util_cl_share, now); + + if (utilisation < 0 || util_gl_share < 0 || util_cl_share[0] < 0 || + util_cl_share[1] < 0) { + utilisation = 0; + util_gl_share = 0; + util_cl_share[0] = 0; + util_cl_share[1] = 0; + goto out; + } + +out: +#ifdef CONFIG_MALI_MIDGARD_DVFS + kbase_platform_dvfs_event(kbdev, utilisation, util_gl_share, + util_cl_share); +#endif /*CONFIG_MALI_MIDGARD_DVFS */ + + kbase_pm_reset_dvfs_utilisation_unlocked(kbdev, now); + + spin_unlock_irqrestore(&kbdev->pm.backend.metrics.lock, flags); +} + +bool kbase_pm_metrics_is_active(struct kbase_device *kbdev) +{ + bool isactive; + unsigned long flags; + + KBASE_DEBUG_ASSERT(kbdev != NULL); + + spin_lock_irqsave(&kbdev->pm.backend.metrics.lock, flags); + isactive = kbdev->pm.backend.metrics.timer_active; + spin_unlock_irqrestore(&kbdev->pm.backend.metrics.lock, flags); + + return isactive; +} +KBASE_EXPORT_TEST_API(kbase_pm_metrics_is_active); + +#endif /* CONFIG_MALI_MIDGARD_DVFS */ + +/** + * kbase_pm_metrics_active_calc - Update PM active counts based on currently + * running atoms + * @kbdev: Device pointer + * + * The caller must hold kbdev->pm.backend.metrics.lock + */ +static void kbase_pm_metrics_active_calc(struct kbase_device *kbdev) +{ + int js; + + lockdep_assert_held(&kbdev->pm.backend.metrics.lock); + + kbdev->pm.backend.metrics.active_gl_ctx[0] = 0; + kbdev->pm.backend.metrics.active_gl_ctx[1] = 0; + kbdev->pm.backend.metrics.active_cl_ctx[0] = 0; + kbdev->pm.backend.metrics.active_cl_ctx[1] = 0; + kbdev->pm.backend.metrics.gpu_active = false; + + for (js = 0; js < BASE_JM_MAX_NR_SLOTS; js++) { + struct kbase_jd_atom *katom = kbase_gpu_inspect(kbdev, js, 0); + + /* Head atom may have just completed, so if it isn't running + * then try the next atom */ + if (katom && katom->gpu_rb_state != KBASE_ATOM_GPU_RB_SUBMITTED) + katom = kbase_gpu_inspect(kbdev, js, 1); + + if (katom && katom->gpu_rb_state == + KBASE_ATOM_GPU_RB_SUBMITTED) { + if (katom->core_req & BASE_JD_REQ_ONLY_COMPUTE) { + int device_nr = (katom->core_req & + BASE_JD_REQ_SPECIFIC_COHERENT_GROUP) + ? katom->device_nr : 0; + if (!WARN_ON(device_nr >= 2)) + kbdev->pm.backend.metrics. + active_cl_ctx[device_nr] = 1; + } else { + /* Slot 2 should not be running non-compute + * atoms */ + if (!WARN_ON(js >= 2)) + kbdev->pm.backend.metrics. + active_gl_ctx[js] = 1; + } + kbdev->pm.backend.metrics.gpu_active = true; + } + } +} + +/* called when job is submitted to or removed from a GPU slot */ +void kbase_pm_metrics_update(struct kbase_device *kbdev, ktime_t *timestamp) +{ + unsigned long flags; + ktime_t now; + + lockdep_assert_held(&kbdev->hwaccess_lock); + + spin_lock_irqsave(&kbdev->pm.backend.metrics.lock, flags); + + if (!timestamp) { + now = ktime_get(); + timestamp = &now; + } + + /* Track how long CL and/or GL jobs have been busy for */ + kbase_pm_get_dvfs_utilisation_calc(kbdev, *timestamp); + + kbase_pm_metrics_active_calc(kbdev); + + spin_unlock_irqrestore(&kbdev->pm.backend.metrics.lock, flags); +} diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_policy.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_policy.c new file mode 100644 index 00000000000000..075f020c66e6b2 --- /dev/null +++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_policy.c @@ -0,0 +1,973 @@ +/* + * + * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +/* + * Power policy API implementations + */ + +#include +#include +#include +#include +#include + +static const struct kbase_pm_policy *const policy_list[] = { +#ifdef CONFIG_MALI_NO_MALI + &kbase_pm_always_on_policy_ops, + &kbase_pm_demand_policy_ops, + &kbase_pm_coarse_demand_policy_ops, +#if !MALI_CUSTOMER_RELEASE + &kbase_pm_demand_always_powered_policy_ops, + &kbase_pm_fast_start_policy_ops, +#endif +#else /* CONFIG_MALI_NO_MALI */ +#if !PLATFORM_POWER_DOWN_ONLY + &kbase_pm_demand_policy_ops, +#endif /* !PLATFORM_POWER_DOWN_ONLY */ + &kbase_pm_coarse_demand_policy_ops, + &kbase_pm_always_on_policy_ops, +#if !MALI_CUSTOMER_RELEASE +#if !PLATFORM_POWER_DOWN_ONLY + &kbase_pm_demand_always_powered_policy_ops, + &kbase_pm_fast_start_policy_ops, +#endif /* !PLATFORM_POWER_DOWN_ONLY */ +#endif +#endif /* CONFIG_MALI_NO_MALI */ +}; + +/* The number of policies available in the system. + * This is derived from the number of functions listed in policy_get_functions. + */ +#define POLICY_COUNT (sizeof(policy_list)/sizeof(*policy_list)) + + +/* Function IDs for looking up Timeline Trace codes in + * kbase_pm_change_state_trace_code */ +enum kbase_pm_func_id { + KBASE_PM_FUNC_ID_REQUEST_CORES_START, + KBASE_PM_FUNC_ID_REQUEST_CORES_END, + KBASE_PM_FUNC_ID_RELEASE_CORES_START, + KBASE_PM_FUNC_ID_RELEASE_CORES_END, + /* Note: kbase_pm_unrequest_cores() is on the slow path, and we neither + * expect to hit it nor tend to hit it very much anyway. We can detect + * whether we need more instrumentation by a difference between + * PM_CHECKTRANS events and PM_SEND/HANDLE_EVENT. */ + + /* Must be the last */ + KBASE_PM_FUNC_ID_COUNT +}; + + +/* State changes during request/unrequest/release-ing cores */ +enum { + KBASE_PM_CHANGE_STATE_SHADER = (1u << 0), + KBASE_PM_CHANGE_STATE_TILER = (1u << 1), + + /* These two must be last */ + KBASE_PM_CHANGE_STATE_MASK = (KBASE_PM_CHANGE_STATE_TILER | + KBASE_PM_CHANGE_STATE_SHADER), + KBASE_PM_CHANGE_STATE_COUNT = KBASE_PM_CHANGE_STATE_MASK + 1 +}; +typedef u32 kbase_pm_change_state; + + +#ifdef CONFIG_MALI_TRACE_TIMELINE +/* Timeline Trace code lookups for each function */ +static u32 kbase_pm_change_state_trace_code[KBASE_PM_FUNC_ID_COUNT] + [KBASE_PM_CHANGE_STATE_COUNT] = { + /* kbase_pm_request_cores */ + [KBASE_PM_FUNC_ID_REQUEST_CORES_START][0] = 0, + [KBASE_PM_FUNC_ID_REQUEST_CORES_START][KBASE_PM_CHANGE_STATE_SHADER] = + SW_FLOW_PM_CHECKTRANS_PM_REQUEST_CORES_SHADER_START, + [KBASE_PM_FUNC_ID_REQUEST_CORES_START][KBASE_PM_CHANGE_STATE_TILER] = + SW_FLOW_PM_CHECKTRANS_PM_REQUEST_CORES_TILER_START, + [KBASE_PM_FUNC_ID_REQUEST_CORES_START][KBASE_PM_CHANGE_STATE_SHADER | + KBASE_PM_CHANGE_STATE_TILER] = + SW_FLOW_PM_CHECKTRANS_PM_REQUEST_CORES_SHADER_TILER_START, + + [KBASE_PM_FUNC_ID_REQUEST_CORES_END][0] = 0, + [KBASE_PM_FUNC_ID_REQUEST_CORES_END][KBASE_PM_CHANGE_STATE_SHADER] = + SW_FLOW_PM_CHECKTRANS_PM_REQUEST_CORES_SHADER_END, + [KBASE_PM_FUNC_ID_REQUEST_CORES_END][KBASE_PM_CHANGE_STATE_TILER] = + SW_FLOW_PM_CHECKTRANS_PM_REQUEST_CORES_TILER_END, + [KBASE_PM_FUNC_ID_REQUEST_CORES_END][KBASE_PM_CHANGE_STATE_SHADER | + KBASE_PM_CHANGE_STATE_TILER] = + SW_FLOW_PM_CHECKTRANS_PM_REQUEST_CORES_SHADER_TILER_END, + + /* kbase_pm_release_cores */ + [KBASE_PM_FUNC_ID_RELEASE_CORES_START][0] = 0, + [KBASE_PM_FUNC_ID_RELEASE_CORES_START][KBASE_PM_CHANGE_STATE_SHADER] = + SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_SHADER_START, + [KBASE_PM_FUNC_ID_RELEASE_CORES_START][KBASE_PM_CHANGE_STATE_TILER] = + SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_TILER_START, + [KBASE_PM_FUNC_ID_RELEASE_CORES_START][KBASE_PM_CHANGE_STATE_SHADER | + KBASE_PM_CHANGE_STATE_TILER] = + SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_SHADER_TILER_START, + + [KBASE_PM_FUNC_ID_RELEASE_CORES_END][0] = 0, + [KBASE_PM_FUNC_ID_RELEASE_CORES_END][KBASE_PM_CHANGE_STATE_SHADER] = + SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_SHADER_END, + [KBASE_PM_FUNC_ID_RELEASE_CORES_END][KBASE_PM_CHANGE_STATE_TILER] = + SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_TILER_END, + [KBASE_PM_FUNC_ID_RELEASE_CORES_END][KBASE_PM_CHANGE_STATE_SHADER | + KBASE_PM_CHANGE_STATE_TILER] = + SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_SHADER_TILER_END +}; + +static inline void kbase_timeline_pm_cores_func(struct kbase_device *kbdev, + enum kbase_pm_func_id func_id, + kbase_pm_change_state state) +{ + int trace_code; + + KBASE_DEBUG_ASSERT(func_id >= 0 && func_id < KBASE_PM_FUNC_ID_COUNT); + KBASE_DEBUG_ASSERT(state != 0 && (state & KBASE_PM_CHANGE_STATE_MASK) == + state); + + trace_code = kbase_pm_change_state_trace_code[func_id][state]; + KBASE_TIMELINE_PM_CHECKTRANS(kbdev, trace_code); +} + +#else /* CONFIG_MALI_TRACE_TIMELINE */ +static inline void kbase_timeline_pm_cores_func(struct kbase_device *kbdev, + enum kbase_pm_func_id func_id, kbase_pm_change_state state) +{ +} + +#endif /* CONFIG_MALI_TRACE_TIMELINE */ + +/** + * kbasep_pm_do_poweroff_cores - Process a poweroff request and power down any + * requested shader cores + * @kbdev: Device pointer + */ +static void kbasep_pm_do_poweroff_cores(struct kbase_device *kbdev) +{ + u64 prev_shader_state = kbdev->pm.backend.desired_shader_state; + u64 prev_tiler_state = kbdev->pm.backend.desired_tiler_state; + + lockdep_assert_held(&kbdev->hwaccess_lock); + + kbdev->pm.backend.desired_shader_state &= + ~kbdev->pm.backend.shader_poweroff_pending; + kbdev->pm.backend.desired_tiler_state &= + ~kbdev->pm.backend.tiler_poweroff_pending; + + kbdev->pm.backend.shader_poweroff_pending = 0; + kbdev->pm.backend.tiler_poweroff_pending = 0; + + if (prev_shader_state != kbdev->pm.backend.desired_shader_state || + prev_tiler_state != + kbdev->pm.backend.desired_tiler_state || + kbdev->pm.backend.ca_in_transition) { + bool cores_are_available; + + KBASE_TIMELINE_PM_CHECKTRANS(kbdev, + SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_DEFERRED_START); + cores_are_available = kbase_pm_check_transitions_nolock(kbdev); + KBASE_TIMELINE_PM_CHECKTRANS(kbdev, + SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_DEFERRED_END); + + /* Don't need 'cores_are_available', + * because we don't return anything */ + CSTD_UNUSED(cores_are_available); + } +} + +static enum hrtimer_restart +kbasep_pm_do_gpu_poweroff_callback(struct hrtimer *timer) +{ + struct kbase_device *kbdev; + unsigned long flags; + + kbdev = container_of(timer, struct kbase_device, + pm.backend.gpu_poweroff_timer); + + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); + + /* It is safe for this call to do nothing if the work item is already + * queued. The worker function will read the must up-to-date state of + * kbdev->pm.backend.gpu_poweroff_pending under lock. + * + * If a state change occurs while the worker function is processing, + * this call will succeed as a work item can be requeued once it has + * started processing. + */ + if (kbdev->pm.backend.gpu_poweroff_pending) + queue_work(kbdev->pm.backend.gpu_poweroff_wq, + &kbdev->pm.backend.gpu_poweroff_work); + + if (kbdev->pm.backend.shader_poweroff_pending || + kbdev->pm.backend.tiler_poweroff_pending) { + kbdev->pm.backend.shader_poweroff_pending_time--; + + KBASE_DEBUG_ASSERT( + kbdev->pm.backend.shader_poweroff_pending_time + >= 0); + + if (!kbdev->pm.backend.shader_poweroff_pending_time) + kbasep_pm_do_poweroff_cores(kbdev); + } + + if (kbdev->pm.backend.poweroff_timer_needed) { + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); + + hrtimer_add_expires(timer, kbdev->pm.gpu_poweroff_time); + + return HRTIMER_RESTART; + } + + kbdev->pm.backend.poweroff_timer_running = false; + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); + + return HRTIMER_NORESTART; +} + +static void kbasep_pm_do_gpu_poweroff_wq(struct work_struct *data) +{ + unsigned long flags; + struct kbase_device *kbdev; + bool do_poweroff = false; + + kbdev = container_of(data, struct kbase_device, + pm.backend.gpu_poweroff_work); + + mutex_lock(&kbdev->pm.lock); + + if (kbdev->pm.backend.gpu_poweroff_pending == 0) { + mutex_unlock(&kbdev->pm.lock); + return; + } + + kbdev->pm.backend.gpu_poweroff_pending--; + + if (kbdev->pm.backend.gpu_poweroff_pending > 0) { + mutex_unlock(&kbdev->pm.lock); + return; + } + + KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_poweroff_pending == 0); + + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); + + /* Only power off the GPU if a request is still pending */ + if (!kbdev->pm.backend.pm_current_policy->get_core_active(kbdev)) + do_poweroff = true; + + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); + + if (do_poweroff) { + kbdev->pm.backend.poweroff_timer_needed = false; + hrtimer_cancel(&kbdev->pm.backend.gpu_poweroff_timer); + kbdev->pm.backend.poweroff_timer_running = false; + + /* Power off the GPU */ + kbase_pm_do_poweroff(kbdev, false); + } + + mutex_unlock(&kbdev->pm.lock); +} + +int kbase_pm_policy_init(struct kbase_device *kbdev) +{ + struct workqueue_struct *wq; + + wq = alloc_workqueue("kbase_pm_do_poweroff", + WQ_HIGHPRI | WQ_UNBOUND, 1); + if (!wq) + return -ENOMEM; + + kbdev->pm.backend.gpu_poweroff_wq = wq; + INIT_WORK(&kbdev->pm.backend.gpu_poweroff_work, + kbasep_pm_do_gpu_poweroff_wq); + hrtimer_init(&kbdev->pm.backend.gpu_poweroff_timer, + CLOCK_MONOTONIC, HRTIMER_MODE_REL); + kbdev->pm.backend.gpu_poweroff_timer.function = + kbasep_pm_do_gpu_poweroff_callback; + kbdev->pm.backend.pm_current_policy = policy_list[0]; + kbdev->pm.backend.pm_current_policy->init(kbdev); + kbdev->pm.gpu_poweroff_time = + HR_TIMER_DELAY_NSEC(DEFAULT_PM_GPU_POWEROFF_TICK_NS); + kbdev->pm.poweroff_shader_ticks = DEFAULT_PM_POWEROFF_TICK_SHADER; + kbdev->pm.poweroff_gpu_ticks = DEFAULT_PM_POWEROFF_TICK_GPU; + + return 0; +} + +void kbase_pm_policy_term(struct kbase_device *kbdev) +{ + kbdev->pm.backend.pm_current_policy->term(kbdev); + destroy_workqueue(kbdev->pm.backend.gpu_poweroff_wq); +} + +void kbase_pm_cancel_deferred_poweroff(struct kbase_device *kbdev) +{ + unsigned long flags; + + lockdep_assert_held(&kbdev->pm.lock); + + kbdev->pm.backend.poweroff_timer_needed = false; + hrtimer_cancel(&kbdev->pm.backend.gpu_poweroff_timer); + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); + kbdev->pm.backend.poweroff_timer_running = false; + + /* If wq is already running but is held off by pm.lock, make sure it has + * no effect */ + kbdev->pm.backend.gpu_poweroff_pending = 0; + + kbdev->pm.backend.shader_poweroff_pending = 0; + kbdev->pm.backend.tiler_poweroff_pending = 0; + kbdev->pm.backend.shader_poweroff_pending_time = 0; + + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); +} + +void kbase_pm_update_active(struct kbase_device *kbdev) +{ + struct kbase_pm_device_data *pm = &kbdev->pm; + struct kbase_pm_backend_data *backend = &pm->backend; + unsigned long flags; + bool active; + + lockdep_assert_held(&pm->lock); + + /* pm_current_policy will never be NULL while pm.lock is held */ + KBASE_DEBUG_ASSERT(backend->pm_current_policy); + + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); + + active = backend->pm_current_policy->get_core_active(kbdev); + + if (active) { + if (backend->gpu_poweroff_pending) { + /* Cancel any pending power off request */ + backend->gpu_poweroff_pending = 0; + + /* If a request was pending then the GPU was still + * powered, so no need to continue */ + if (!kbdev->poweroff_pending) { + spin_unlock_irqrestore(&kbdev->hwaccess_lock, + flags); + return; + } + } + + if (!backend->poweroff_timer_running && !backend->gpu_powered && + (pm->poweroff_gpu_ticks || + pm->poweroff_shader_ticks)) { + backend->poweroff_timer_needed = true; + backend->poweroff_timer_running = true; + hrtimer_start(&backend->gpu_poweroff_timer, + pm->gpu_poweroff_time, + HRTIMER_MODE_REL); + } + + /* Power on the GPU and any cores requested by the policy */ + if (pm->backend.poweroff_wait_in_progress) { + pm->backend.poweron_required = true; + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); + } else { + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); + kbase_pm_do_poweron(kbdev, false); + } + } else { + /* It is an error for the power policy to power off the GPU + * when there are contexts active */ + KBASE_DEBUG_ASSERT(pm->active_count == 0); + + if (backend->shader_poweroff_pending || + backend->tiler_poweroff_pending) { + backend->shader_poweroff_pending = 0; + backend->tiler_poweroff_pending = 0; + backend->shader_poweroff_pending_time = 0; + } + + /* Request power off */ + if (pm->backend.gpu_powered) { + if (pm->poweroff_gpu_ticks) { + backend->gpu_poweroff_pending = + pm->poweroff_gpu_ticks; + backend->poweroff_timer_needed = true; + if (!backend->poweroff_timer_running) { + /* Start timer if not running (eg if + * power policy has been changed from + * always_on to something else). This + * will ensure the GPU is actually + * powered off */ + backend->poweroff_timer_running + = true; + hrtimer_start( + &backend->gpu_poweroff_timer, + pm->gpu_poweroff_time, + HRTIMER_MODE_REL); + } + spin_unlock_irqrestore(&kbdev->hwaccess_lock, + flags); + } else { + spin_unlock_irqrestore(&kbdev->hwaccess_lock, + flags); + + /* Power off the GPU immediately */ + kbase_pm_do_poweroff(kbdev, false); + } + } else { + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); + } + } +} + +void kbase_pm_update_cores_state_nolock(struct kbase_device *kbdev) +{ + u64 desired_bitmap; + u64 desired_tiler_bitmap; + bool cores_are_available; + bool do_poweroff = false; + + lockdep_assert_held(&kbdev->hwaccess_lock); + + if (kbdev->pm.backend.pm_current_policy == NULL) + return; + if (kbdev->pm.backend.poweroff_wait_in_progress) + return; + + if (kbdev->protected_mode_transition && !kbdev->shader_needed_bitmap && + !kbdev->shader_inuse_bitmap && !kbdev->tiler_needed_cnt + && !kbdev->tiler_inuse_cnt) { + /* We are trying to change in/out of protected mode - force all + * cores off so that the L2 powers down */ + desired_bitmap = 0; + desired_tiler_bitmap = 0; + } else { + desired_bitmap = + kbdev->pm.backend.pm_current_policy->get_core_mask(kbdev); + desired_bitmap &= kbase_pm_ca_get_core_mask(kbdev); + + if (kbdev->tiler_needed_cnt > 0 || kbdev->tiler_inuse_cnt > 0) + desired_tiler_bitmap = 1; + else + desired_tiler_bitmap = 0; + + if (!kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_XAFFINITY)) { + /* Unless XAFFINITY is supported, enable core 0 if tiler + * required, regardless of core availability */ + if (kbdev->tiler_needed_cnt > 0 || + kbdev->tiler_inuse_cnt > 0) + desired_bitmap |= 1; + } + } + + if (kbdev->pm.backend.desired_shader_state != desired_bitmap) + KBASE_TRACE_ADD(kbdev, PM_CORES_CHANGE_DESIRED, NULL, NULL, 0u, + (u32)desired_bitmap); + /* Are any cores being powered on? */ + if (~kbdev->pm.backend.desired_shader_state & desired_bitmap || + ~kbdev->pm.backend.desired_tiler_state & desired_tiler_bitmap || + kbdev->pm.backend.ca_in_transition) { + /* Check if we are powering off any cores before updating shader + * state */ + if (kbdev->pm.backend.desired_shader_state & ~desired_bitmap || + kbdev->pm.backend.desired_tiler_state & + ~desired_tiler_bitmap) { + /* Start timer to power off cores */ + kbdev->pm.backend.shader_poweroff_pending |= + (kbdev->pm.backend.desired_shader_state & + ~desired_bitmap); + kbdev->pm.backend.tiler_poweroff_pending |= + (kbdev->pm.backend.desired_tiler_state & + ~desired_tiler_bitmap); + + if (kbdev->pm.poweroff_shader_ticks && + !kbdev->protected_mode_transition) + kbdev->pm.backend.shader_poweroff_pending_time = + kbdev->pm.poweroff_shader_ticks; + else + do_poweroff = true; + } + + kbdev->pm.backend.desired_shader_state = desired_bitmap; + kbdev->pm.backend.desired_tiler_state = desired_tiler_bitmap; + + /* If any cores are being powered on, transition immediately */ + cores_are_available = kbase_pm_check_transitions_nolock(kbdev); + } else if (kbdev->pm.backend.desired_shader_state & ~desired_bitmap || + kbdev->pm.backend.desired_tiler_state & + ~desired_tiler_bitmap) { + /* Start timer to power off cores */ + kbdev->pm.backend.shader_poweroff_pending |= + (kbdev->pm.backend.desired_shader_state & + ~desired_bitmap); + kbdev->pm.backend.tiler_poweroff_pending |= + (kbdev->pm.backend.desired_tiler_state & + ~desired_tiler_bitmap); + if (kbdev->pm.poweroff_shader_ticks && + !kbdev->protected_mode_transition) + kbdev->pm.backend.shader_poweroff_pending_time = + kbdev->pm.poweroff_shader_ticks; + else + kbasep_pm_do_poweroff_cores(kbdev); + } else if (kbdev->pm.active_count == 0 && desired_bitmap != 0 && + desired_tiler_bitmap != 0 && + kbdev->pm.backend.poweroff_timer_needed) { + /* If power policy is keeping cores on despite there being no + * active contexts then disable poweroff timer as it isn't + * required. + * Only reset poweroff_timer_needed if we're not in the middle + * of the power off callback */ + kbdev->pm.backend.poweroff_timer_needed = false; + } + + /* Ensure timer does not power off wanted cores and make sure to power + * off unwanted cores */ + if (kbdev->pm.backend.shader_poweroff_pending || + kbdev->pm.backend.tiler_poweroff_pending) { + kbdev->pm.backend.shader_poweroff_pending &= + ~(kbdev->pm.backend.desired_shader_state & + desired_bitmap); + kbdev->pm.backend.tiler_poweroff_pending &= + ~(kbdev->pm.backend.desired_tiler_state & + desired_tiler_bitmap); + + if (!kbdev->pm.backend.shader_poweroff_pending && + !kbdev->pm.backend.tiler_poweroff_pending) + kbdev->pm.backend.shader_poweroff_pending_time = 0; + } + + /* Shader poweroff is deferred to the end of the function, to eliminate + * issues caused by the core availability policy recursing into this + * function */ + if (do_poweroff) + kbasep_pm_do_poweroff_cores(kbdev); + + /* Don't need 'cores_are_available', because we don't return anything */ + CSTD_UNUSED(cores_are_available); +} + +void kbase_pm_update_cores_state(struct kbase_device *kbdev) +{ + unsigned long flags; + + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); + + kbase_pm_update_cores_state_nolock(kbdev); + + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); +} + +int kbase_pm_list_policies(const struct kbase_pm_policy * const **list) +{ + if (!list) + return POLICY_COUNT; + + *list = policy_list; + + return POLICY_COUNT; +} + +KBASE_EXPORT_TEST_API(kbase_pm_list_policies); + +const struct kbase_pm_policy *kbase_pm_get_policy(struct kbase_device *kbdev) +{ + KBASE_DEBUG_ASSERT(kbdev != NULL); + + return kbdev->pm.backend.pm_current_policy; +} + +KBASE_EXPORT_TEST_API(kbase_pm_get_policy); + +void kbase_pm_set_policy(struct kbase_device *kbdev, + const struct kbase_pm_policy *new_policy) +{ + struct kbasep_js_device_data *js_devdata = &kbdev->js_data; + const struct kbase_pm_policy *old_policy; + unsigned long flags; + + KBASE_DEBUG_ASSERT(kbdev != NULL); + KBASE_DEBUG_ASSERT(new_policy != NULL); + + KBASE_TRACE_ADD(kbdev, PM_SET_POLICY, NULL, NULL, 0u, new_policy->id); + + /* During a policy change we pretend the GPU is active */ + /* A suspend won't happen here, because we're in a syscall from a + * userspace thread */ + kbase_pm_context_active(kbdev); + + mutex_lock(&js_devdata->runpool_mutex); + mutex_lock(&kbdev->pm.lock); + + /* Remove the policy to prevent IRQ handlers from working on it */ + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); + old_policy = kbdev->pm.backend.pm_current_policy; + kbdev->pm.backend.pm_current_policy = NULL; + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); + + KBASE_TRACE_ADD(kbdev, PM_CURRENT_POLICY_TERM, NULL, NULL, 0u, + old_policy->id); + if (old_policy->term) + old_policy->term(kbdev); + + KBASE_TRACE_ADD(kbdev, PM_CURRENT_POLICY_INIT, NULL, NULL, 0u, + new_policy->id); + if (new_policy->init) + new_policy->init(kbdev); + + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); + kbdev->pm.backend.pm_current_policy = new_policy; + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); + + /* If any core power state changes were previously attempted, but + * couldn't be made because the policy was changing (current_policy was + * NULL), then re-try them here. */ + kbase_pm_update_active(kbdev); + kbase_pm_update_cores_state(kbdev); + + mutex_unlock(&kbdev->pm.lock); + mutex_unlock(&js_devdata->runpool_mutex); + + /* Now the policy change is finished, we release our fake context active + * reference */ + kbase_pm_context_idle(kbdev); +} + +KBASE_EXPORT_TEST_API(kbase_pm_set_policy); + +/* Check whether a state change has finished, and trace it as completed */ +static void +kbase_pm_trace_check_and_finish_state_change(struct kbase_device *kbdev) +{ + if ((kbdev->shader_available_bitmap & + kbdev->pm.backend.desired_shader_state) + == kbdev->pm.backend.desired_shader_state && + (kbdev->tiler_available_bitmap & + kbdev->pm.backend.desired_tiler_state) + == kbdev->pm.backend.desired_tiler_state) + kbase_timeline_pm_check_handle_event(kbdev, + KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED); +} + +void kbase_pm_request_cores(struct kbase_device *kbdev, + bool tiler_required, u64 shader_cores) +{ + u64 cores; + + kbase_pm_change_state change_gpu_state = 0u; + + KBASE_DEBUG_ASSERT(kbdev != NULL); + + lockdep_assert_held(&kbdev->hwaccess_lock); + + cores = shader_cores; + while (cores) { + int bitnum = fls64(cores) - 1; + u64 bit = 1ULL << bitnum; + + /* It should be almost impossible for this to overflow. It would + * require 2^32 atoms to request a particular core, which would + * require 2^24 contexts to submit. This would require an amount + * of memory that is impossible on a 32-bit system and extremely + * unlikely on a 64-bit system. */ + int cnt = ++kbdev->shader_needed_cnt[bitnum]; + + if (1 == cnt) { + kbdev->shader_needed_bitmap |= bit; + change_gpu_state |= KBASE_PM_CHANGE_STATE_SHADER; + } + + cores &= ~bit; + } + + if (tiler_required) { + int cnt = ++kbdev->tiler_needed_cnt; + + if (1 == cnt) + change_gpu_state |= KBASE_PM_CHANGE_STATE_TILER; + + KBASE_DEBUG_ASSERT(kbdev->tiler_needed_cnt != 0); + } + + if (change_gpu_state) { + KBASE_TRACE_ADD(kbdev, PM_REQUEST_CHANGE_SHADER_NEEDED, NULL, + NULL, 0u, (u32) kbdev->shader_needed_bitmap); + + kbase_timeline_pm_cores_func(kbdev, + KBASE_PM_FUNC_ID_REQUEST_CORES_START, + change_gpu_state); + kbase_pm_update_cores_state_nolock(kbdev); + kbase_timeline_pm_cores_func(kbdev, + KBASE_PM_FUNC_ID_REQUEST_CORES_END, + change_gpu_state); + } +} + +KBASE_EXPORT_TEST_API(kbase_pm_request_cores); + +void kbase_pm_unrequest_cores(struct kbase_device *kbdev, + bool tiler_required, u64 shader_cores) +{ + kbase_pm_change_state change_gpu_state = 0u; + + KBASE_DEBUG_ASSERT(kbdev != NULL); + + lockdep_assert_held(&kbdev->hwaccess_lock); + + while (shader_cores) { + int bitnum = fls64(shader_cores) - 1; + u64 bit = 1ULL << bitnum; + int cnt; + + KBASE_DEBUG_ASSERT(kbdev->shader_needed_cnt[bitnum] > 0); + + cnt = --kbdev->shader_needed_cnt[bitnum]; + + if (0 == cnt) { + kbdev->shader_needed_bitmap &= ~bit; + + change_gpu_state |= KBASE_PM_CHANGE_STATE_SHADER; + } + + shader_cores &= ~bit; + } + + if (tiler_required) { + int cnt; + + KBASE_DEBUG_ASSERT(kbdev->tiler_needed_cnt > 0); + + cnt = --kbdev->tiler_needed_cnt; + + if (0 == cnt) + change_gpu_state |= KBASE_PM_CHANGE_STATE_TILER; + } + + if (change_gpu_state) { + KBASE_TRACE_ADD(kbdev, PM_UNREQUEST_CHANGE_SHADER_NEEDED, NULL, + NULL, 0u, (u32) kbdev->shader_needed_bitmap); + + kbase_pm_update_cores_state_nolock(kbdev); + + /* Trace that any state change effectively completes immediately + * - no-one will wait on the state change */ + kbase_pm_trace_check_and_finish_state_change(kbdev); + } +} + +KBASE_EXPORT_TEST_API(kbase_pm_unrequest_cores); + +enum kbase_pm_cores_ready +kbase_pm_register_inuse_cores(struct kbase_device *kbdev, + bool tiler_required, u64 shader_cores) +{ + u64 prev_shader_needed; /* Just for tracing */ + u64 prev_shader_inuse; /* Just for tracing */ + + lockdep_assert_held(&kbdev->hwaccess_lock); + + prev_shader_needed = kbdev->shader_needed_bitmap; + prev_shader_inuse = kbdev->shader_inuse_bitmap; + + /* If desired_shader_state does not contain the requested cores, then + * power management is not attempting to powering those cores (most + * likely due to core availability policy) and a new job affinity must + * be chosen */ + if ((kbdev->pm.backend.desired_shader_state & shader_cores) != + shader_cores) { + return (kbdev->pm.backend.poweroff_wait_in_progress || + kbdev->pm.backend.pm_current_policy == NULL) ? + KBASE_CORES_NOT_READY : KBASE_NEW_AFFINITY; + } + + if ((kbdev->shader_available_bitmap & shader_cores) != shader_cores || + (tiler_required && !kbdev->tiler_available_bitmap)) { + /* Trace ongoing core transition */ + kbase_timeline_pm_l2_transition_start(kbdev); + return KBASE_CORES_NOT_READY; + } + + /* If we started to trace a state change, then trace it has being + * finished by now, at the very latest */ + kbase_pm_trace_check_and_finish_state_change(kbdev); + /* Trace core transition done */ + kbase_timeline_pm_l2_transition_done(kbdev); + + while (shader_cores) { + int bitnum = fls64(shader_cores) - 1; + u64 bit = 1ULL << bitnum; + int cnt; + + KBASE_DEBUG_ASSERT(kbdev->shader_needed_cnt[bitnum] > 0); + + cnt = --kbdev->shader_needed_cnt[bitnum]; + + if (0 == cnt) + kbdev->shader_needed_bitmap &= ~bit; + + /* shader_inuse_cnt should not overflow because there can only + * be a very limited number of jobs on the h/w at one time */ + + kbdev->shader_inuse_cnt[bitnum]++; + kbdev->shader_inuse_bitmap |= bit; + + shader_cores &= ~bit; + } + + if (tiler_required) { + KBASE_DEBUG_ASSERT(kbdev->tiler_needed_cnt > 0); + + --kbdev->tiler_needed_cnt; + + kbdev->tiler_inuse_cnt++; + + KBASE_DEBUG_ASSERT(kbdev->tiler_inuse_cnt != 0); + } + + if (prev_shader_needed != kbdev->shader_needed_bitmap) + KBASE_TRACE_ADD(kbdev, PM_REGISTER_CHANGE_SHADER_NEEDED, NULL, + NULL, 0u, (u32) kbdev->shader_needed_bitmap); + + if (prev_shader_inuse != kbdev->shader_inuse_bitmap) + KBASE_TRACE_ADD(kbdev, PM_REGISTER_CHANGE_SHADER_INUSE, NULL, + NULL, 0u, (u32) kbdev->shader_inuse_bitmap); + + return KBASE_CORES_READY; +} + +KBASE_EXPORT_TEST_API(kbase_pm_register_inuse_cores); + +void kbase_pm_release_cores(struct kbase_device *kbdev, + bool tiler_required, u64 shader_cores) +{ + kbase_pm_change_state change_gpu_state = 0u; + + KBASE_DEBUG_ASSERT(kbdev != NULL); + + lockdep_assert_held(&kbdev->hwaccess_lock); + + while (shader_cores) { + int bitnum = fls64(shader_cores) - 1; + u64 bit = 1ULL << bitnum; + int cnt; + + KBASE_DEBUG_ASSERT(kbdev->shader_inuse_cnt[bitnum] > 0); + + cnt = --kbdev->shader_inuse_cnt[bitnum]; + + if (0 == cnt) { + kbdev->shader_inuse_bitmap &= ~bit; + change_gpu_state |= KBASE_PM_CHANGE_STATE_SHADER; + } + + shader_cores &= ~bit; + } + + if (tiler_required) { + int cnt; + + KBASE_DEBUG_ASSERT(kbdev->tiler_inuse_cnt > 0); + + cnt = --kbdev->tiler_inuse_cnt; + + if (0 == cnt) + change_gpu_state |= KBASE_PM_CHANGE_STATE_TILER; + } + + if (change_gpu_state) { + KBASE_TRACE_ADD(kbdev, PM_RELEASE_CHANGE_SHADER_INUSE, NULL, + NULL, 0u, (u32) kbdev->shader_inuse_bitmap); + + kbase_timeline_pm_cores_func(kbdev, + KBASE_PM_FUNC_ID_RELEASE_CORES_START, + change_gpu_state); + kbase_pm_update_cores_state_nolock(kbdev); + kbase_timeline_pm_cores_func(kbdev, + KBASE_PM_FUNC_ID_RELEASE_CORES_END, + change_gpu_state); + + /* Trace that any state change completed immediately */ + kbase_pm_trace_check_and_finish_state_change(kbdev); + } +} + +KBASE_EXPORT_TEST_API(kbase_pm_release_cores); + +void kbase_pm_request_cores_sync(struct kbase_device *kbdev, + bool tiler_required, + u64 shader_cores) +{ + unsigned long flags; + + kbase_pm_wait_for_poweroff_complete(kbdev); + + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); + kbase_pm_request_cores(kbdev, tiler_required, shader_cores); + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); + + kbase_pm_check_transitions_sync(kbdev); +} + +KBASE_EXPORT_TEST_API(kbase_pm_request_cores_sync); + +void kbase_pm_request_l2_caches(struct kbase_device *kbdev) +{ + unsigned long flags; + u32 prior_l2_users_count; + + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); + + prior_l2_users_count = kbdev->l2_users_count++; + + KBASE_DEBUG_ASSERT(kbdev->l2_users_count != 0); + + /* if the GPU is reset while the l2 is on, l2 will be off but + * prior_l2_users_count will be > 0. l2_available_bitmap will have been + * set to 0 though by kbase_pm_init_hw */ + if (!prior_l2_users_count || !kbdev->l2_available_bitmap) + kbase_pm_check_transitions_nolock(kbdev); + + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); + wait_event(kbdev->pm.backend.l2_powered_wait, + kbdev->pm.backend.l2_powered == 1); + + /* Trace that any state change completed immediately */ + kbase_pm_trace_check_and_finish_state_change(kbdev); +} + +KBASE_EXPORT_TEST_API(kbase_pm_request_l2_caches); + +void kbase_pm_request_l2_caches_l2_is_on(struct kbase_device *kbdev) +{ + lockdep_assert_held(&kbdev->hwaccess_lock); + + kbdev->l2_users_count++; +} + +KBASE_EXPORT_TEST_API(kbase_pm_request_l2_caches_l2_is_on); + +void kbase_pm_release_l2_caches(struct kbase_device *kbdev) +{ + lockdep_assert_held(&kbdev->hwaccess_lock); + + KBASE_DEBUG_ASSERT(kbdev->l2_users_count > 0); + + --kbdev->l2_users_count; + + if (!kbdev->l2_users_count) { + kbase_pm_check_transitions_nolock(kbdev); + /* Trace that any state change completed immediately */ + kbase_pm_trace_check_and_finish_state_change(kbdev); + } +} + +KBASE_EXPORT_TEST_API(kbase_pm_release_l2_caches); diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_policy.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_policy.h new file mode 100644 index 00000000000000..611a90e66e6593 --- /dev/null +++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_policy.h @@ -0,0 +1,227 @@ +/* + * + * (C) COPYRIGHT 2010-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +/* + * Power policy API definitions + */ + +#ifndef _KBASE_PM_POLICY_H_ +#define _KBASE_PM_POLICY_H_ + +/** + * kbase_pm_policy_init - Initialize power policy framework + * + * @kbdev: The kbase device structure for the device (must be a valid pointer) + * + * Must be called before calling any other policy function + * + * Return: 0 if the power policy framework was successfully + * initialized, -errno otherwise. + */ +int kbase_pm_policy_init(struct kbase_device *kbdev); + +/** + * kbase_pm_policy_term - Terminate power policy framework + * + * @kbdev: The kbase device structure for the device (must be a valid pointer) + */ +void kbase_pm_policy_term(struct kbase_device *kbdev); + +/** + * kbase_pm_update_active - Update the active power state of the GPU + * + * @kbdev: The kbase device structure for the device (must be a valid pointer) + * + * Calls into the current power policy + */ +void kbase_pm_update_active(struct kbase_device *kbdev); + +/** + * kbase_pm_update_cores - Update the desired core state of the GPU + * + * @kbdev: The kbase device structure for the device (must be a valid pointer) + * + * Calls into the current power policy + */ +void kbase_pm_update_cores(struct kbase_device *kbdev); + + +enum kbase_pm_cores_ready { + KBASE_CORES_NOT_READY = 0, + KBASE_NEW_AFFINITY = 1, + KBASE_CORES_READY = 2 +}; + + +/** + * kbase_pm_request_cores_sync - Synchronous variant of kbase_pm_request_cores() + * + * @kbdev: The kbase device structure for the device + * @tiler_required: true if the tiler is required, false otherwise + * @shader_cores: A bitmask of shader cores which are necessary for the job + * + * When this function returns, the @shader_cores will be in the READY state. + * + * This is safe variant of kbase_pm_check_transitions_sync(): it handles the + * work of ensuring the requested cores will remain powered until a matching + * call to kbase_pm_unrequest_cores()/kbase_pm_release_cores() (as appropriate) + * is made. + */ +void kbase_pm_request_cores_sync(struct kbase_device *kbdev, + bool tiler_required, u64 shader_cores); + +/** + * kbase_pm_request_cores - Mark one or more cores as being required + * for jobs to be submitted + * + * @kbdev: The kbase device structure for the device + * @tiler_required: true if the tiler is required, false otherwise + * @shader_cores: A bitmask of shader cores which are necessary for the job + * + * This function is called by the job scheduler to mark one or more cores as + * being required to submit jobs that are ready to run. + * + * The cores requested are reference counted and a subsequent call to + * kbase_pm_register_inuse_cores() or kbase_pm_unrequest_cores() should be + * made to dereference the cores as being 'needed'. + * + * The active power policy will meet or exceed the requirements of the + * requested cores in the system. Any core transitions needed will be begun + * immediately, but they might not complete/the cores might not be available + * until a Power Management IRQ. + * + * Return: 0 if the cores were successfully requested, or -errno otherwise. + */ +void kbase_pm_request_cores(struct kbase_device *kbdev, + bool tiler_required, u64 shader_cores); + +/** + * kbase_pm_unrequest_cores - Unmark one or more cores as being required for + * jobs to be submitted. + * + * @kbdev: The kbase device structure for the device + * @tiler_required: true if the tiler is required, false otherwise + * @shader_cores: A bitmask of shader cores (as given to + * kbase_pm_request_cores() ) + * + * This function undoes the effect of kbase_pm_request_cores(). It should be + * used when a job is not going to be submitted to the hardware (e.g. the job is + * cancelled before it is enqueued). + * + * The active power policy will meet or exceed the requirements of the + * requested cores in the system. Any core transitions needed will be begun + * immediately, but they might not complete until a Power Management IRQ. + * + * The policy may use this as an indication that it can power down cores. + */ +void kbase_pm_unrequest_cores(struct kbase_device *kbdev, + bool tiler_required, u64 shader_cores); + +/** + * kbase_pm_register_inuse_cores - Register a set of cores as in use by a job + * + * @kbdev: The kbase device structure for the device + * @tiler_required: true if the tiler is required, false otherwise + * @shader_cores: A bitmask of shader cores (as given to + * kbase_pm_request_cores() ) + * + * This function should be called after kbase_pm_request_cores() when the job + * is about to be submitted to the hardware. It will check that the necessary + * cores are available and if so update the 'needed' and 'inuse' bitmasks to + * reflect that the job is now committed to being run. + * + * If the necessary cores are not currently available then the function will + * return %KBASE_CORES_NOT_READY and have no effect. + * + * Return: %KBASE_CORES_NOT_READY if the cores are not immediately ready, + * + * %KBASE_NEW_AFFINITY if the affinity requested is not allowed, + * + * %KBASE_CORES_READY if the cores requested are already available + */ +enum kbase_pm_cores_ready kbase_pm_register_inuse_cores( + struct kbase_device *kbdev, + bool tiler_required, + u64 shader_cores); + +/** + * kbase_pm_release_cores - Release cores after a job has run + * + * @kbdev: The kbase device structure for the device + * @tiler_required: true if the tiler is required, false otherwise + * @shader_cores: A bitmask of shader cores (as given to + * kbase_pm_register_inuse_cores() ) + * + * This function should be called when a job has finished running on the + * hardware. A call to kbase_pm_register_inuse_cores() must have previously + * occurred. The reference counts of the specified cores will be decremented + * which may cause the bitmask of 'inuse' cores to be reduced. The power policy + * may then turn off any cores which are no longer 'inuse'. + */ +void kbase_pm_release_cores(struct kbase_device *kbdev, + bool tiler_required, u64 shader_cores); + +/** + * kbase_pm_request_l2_caches - Request l2 caches + * + * @kbdev: The kbase device structure for the device (must be a valid pointer) + * + * Request the use of l2 caches for all core groups, power up, wait and prevent + * the power manager from powering down the l2 caches. + * + * This tells the power management that the caches should be powered up, and + * they should remain powered, irrespective of the usage of shader cores. This + * does not return until the l2 caches are powered up. + * + * The caller must call kbase_pm_release_l2_caches() when they are finished + * to allow normal power management of the l2 caches to resume. + * + * This should only be used when power management is active. + */ +void kbase_pm_request_l2_caches(struct kbase_device *kbdev); + +/** + * kbase_pm_request_l2_caches_l2_is_on - Request l2 caches but don't power on + * + * @kbdev: The kbase device structure for the device (must be a valid pointer) + * + * Increment the count of l2 users but do not attempt to power on the l2 + * + * It is the callers responsibility to ensure that the l2 is already powered up + * and to eventually call kbase_pm_release_l2_caches() + */ +void kbase_pm_request_l2_caches_l2_is_on(struct kbase_device *kbdev); + +/** + * kbase_pm_request_l2_caches - Release l2 caches + * + * @kbdev: The kbase device structure for the device (must be a valid pointer) + * + * Release the use of l2 caches for all core groups and allow the power manager + * to power them down when necessary. + * + * This tells the power management that the caches can be powered down if + * necessary, with respect to the usage of shader cores. + * + * The caller must have called kbase_pm_request_l2_caches() prior to a call + * to this. + * + * This should only be used when power management is active. + */ +void kbase_pm_release_l2_caches(struct kbase_device *kbdev); + +#endif /* _KBASE_PM_POLICY_H_ */ diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_time.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_time.c new file mode 100644 index 00000000000000..9cfe6b7332a677 --- /dev/null +++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_time.c @@ -0,0 +1,111 @@ +/* + * + * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +#include +#include +#include +#include + +void kbase_backend_get_gpu_time(struct kbase_device *kbdev, u64 *cycle_counter, +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0) + u64 *system_time, struct timespec64 *ts) +#else + u64 *system_time, struct timespec *ts) +#endif +{ + u32 hi1, hi2; + + kbase_pm_request_gpu_cycle_counter(kbdev); + + /* Read hi, lo, hi to ensure that overflow from lo to hi is handled + * correctly */ + do { + hi1 = kbase_reg_read(kbdev, GPU_CONTROL_REG(CYCLE_COUNT_HI), + NULL); + *cycle_counter = kbase_reg_read(kbdev, + GPU_CONTROL_REG(CYCLE_COUNT_LO), NULL); + hi2 = kbase_reg_read(kbdev, GPU_CONTROL_REG(CYCLE_COUNT_HI), + NULL); + *cycle_counter |= (((u64) hi1) << 32); + } while (hi1 != hi2); + + /* Read hi, lo, hi to ensure that overflow from lo to hi is handled + * correctly */ + do { + hi1 = kbase_reg_read(kbdev, GPU_CONTROL_REG(TIMESTAMP_HI), + NULL); + *system_time = kbase_reg_read(kbdev, + GPU_CONTROL_REG(TIMESTAMP_LO), NULL); + hi2 = kbase_reg_read(kbdev, GPU_CONTROL_REG(TIMESTAMP_HI), + NULL); + *system_time |= (((u64) hi1) << 32); + } while (hi1 != hi2); + + /* Record the CPU's idea of current time */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0) + ktime_get_raw_ts64(ts); +#else + getrawmonotonic(ts); +#endif + + kbase_pm_release_gpu_cycle_counter(kbdev); +} + +/** + * kbase_wait_write_flush - Wait for GPU write flush + * @kctx: Context pointer + * + * Wait 1000 GPU clock cycles. This delay is known to give the GPU time to flush + * its write buffer. + * + * Only in use for BASE_HW_ISSUE_6367 + * + * Note : If GPU resets occur then the counters are reset to zero, the delay may + * not be as expected. + */ +#ifndef CONFIG_MALI_NO_MALI +void kbase_wait_write_flush(struct kbase_context *kctx) +{ + u32 base_count = 0; + + /* + * The caller must be holding onto the kctx or the call is from + * userspace. + */ + kbase_pm_context_active(kctx->kbdev); + kbase_pm_request_gpu_cycle_counter(kctx->kbdev); + + while (true) { + u32 new_count; + + new_count = kbase_reg_read(kctx->kbdev, + GPU_CONTROL_REG(CYCLE_COUNT_LO), NULL); + /* First time around, just store the count. */ + if (base_count == 0) { + base_count = new_count; + continue; + } + + /* No need to handle wrapping, unsigned maths works for this. */ + if ((new_count - base_count) > 1000) + break; + } + + kbase_pm_release_gpu_cycle_counter(kctx->kbdev); + kbase_pm_context_idle(kctx->kbdev); +} +#endif /* CONFIG_MALI_NO_MALI */ diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_time.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_time.h new file mode 100644 index 00000000000000..5a32345df709c3 --- /dev/null +++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_time.h @@ -0,0 +1,56 @@ +/* + * + * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +#ifndef _KBASE_BACKEND_TIME_H_ +#define _KBASE_BACKEND_TIME_H_ + +/** + * kbase_backend_get_gpu_time() - Get current GPU time + * @kbdev: Device pointer + * @cycle_counter: Pointer to u64 to store cycle counter in + * @system_time: Pointer to u64 to store system time in + * @ts: Pointer to struct timespec to store current monotonic + * time in + */ +void kbase_backend_get_gpu_time(struct kbase_device *kbdev, u64 *cycle_counter, +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0) + u64 *system_time, struct timespec64 *ts); +#else + u64 *system_time, struct timespec *ts); +#endif + +/** + * kbase_wait_write_flush() - Wait for GPU write flush + * @kctx: Context pointer + * + * Wait 1000 GPU clock cycles. This delay is known to give the GPU time to flush + * its write buffer. + * + * If GPU resets occur then the counters are reset to zero, the delay may not be + * as expected. + * + * This function is only in use for BASE_HW_ISSUE_6367 + */ +#ifdef CONFIG_MALI_NO_MALI +static inline void kbase_wait_write_flush(struct kbase_context *kctx) +{ +} +#else +void kbase_wait_write_flush(struct kbase_context *kctx); +#endif + +#endif /* _KBASE_BACKEND_TIME_H_ */ diff --git a/drivers/gpu/arm/midgard/docs/Doxyfile b/drivers/gpu/arm/midgard/docs/Doxyfile new file mode 100644 index 00000000000000..35ff2f1ce4a068 --- /dev/null +++ b/drivers/gpu/arm/midgard/docs/Doxyfile @@ -0,0 +1,126 @@ +# +# (C) COPYRIGHT 2011-2013, 2015 ARM Limited. All rights reserved. +# +# This program is free software and is provided to you under the terms of the +# GNU General Public License version 2 as published by the Free Software +# Foundation, and any use by you of this program is subject to the terms +# of such GNU licence. +# +# A copy of the licence is included with the program, and can also be obtained +# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, +# Boston, MA 02110-1301, USA. +# +# + + + +############################################################################## + +# This file contains per-module Doxygen configuration. Please do not add +# extra settings to this file without consulting all stakeholders, as they +# may cause override project-wide settings. +# +# Additionally, when defining aliases, macros, sections etc, use the module +# name as a prefix e.g. gles_my_alias. + +############################################################################## + +@INCLUDE = ../../bldsys/Doxyfile_common + +# The INPUT tag can be used to specify the files and/or directories that contain +# documented source files. You may enter file names like "myfile.cpp" or +# directories like "/usr/src/myproject". Separate the files or directories +# with spaces. + +INPUT += ../../kernel/drivers/gpu/arm/midgard/ + +############################################################################## +# Everything below here is optional, and in most cases not required +############################################################################## + +# This tag can be used to specify a number of aliases that acts +# as commands in the documentation. An alias has the form "name=value". +# For example adding "sideeffect=\par Side Effects:\n" will allow you to +# put the command \sideeffect (or @sideeffect) in the documentation, which +# will result in a user-defined paragraph with heading "Side Effects:". +# You can put \n's in the value part of an alias to insert newlines. + +ALIASES += + +# The ENABLED_SECTIONS tag can be used to enable conditional +# documentation sections, marked by \if sectionname ... \endif. + +ENABLED_SECTIONS += + +# If the value of the INPUT tag contains directories, you can use the +# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp +# and *.h) to filter out the source-files in the directories. If left +# blank the following patterns are tested: +# *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx +# *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.py *.f90 + +FILE_PATTERNS += + +# The EXCLUDE tag can be used to specify files and/or directories that should +# excluded from the INPUT source files. This way you can easily exclude a +# subdirectory from a directory tree whose root is specified with the INPUT tag. +EXCLUDE += ../../kernel/drivers/gpu/arm/midgard/platform ../../kernel/drivers/gpu/arm/midgard/platform_dummy ../../kernel/drivers/gpu/arm/midgard/scripts ../../kernel/drivers/gpu/arm/midgard/tests ../../kernel/drivers/gpu/arm/midgard/Makefile ../../kernel/drivers/gpu/arm/midgard/Makefile.kbase ../../kernel/drivers/gpu/arm/midgard/Kbuild ../../kernel/drivers/gpu/arm/midgard/Kconfig ../../kernel/drivers/gpu/arm/midgard/sconscript ../../kernel/drivers/gpu/arm/midgard/docs ../../kernel/drivers/gpu/arm/midgard/pm_test_script.sh ../../kernel/drivers/gpu/arm/midgard/mali_uk.h ../../kernel/drivers/gpu/arm/midgard/Makefile + + +# If the value of the INPUT tag contains directories, you can use the +# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude +# certain files from those directories. Note that the wildcards are matched +# against the file with absolute path, so to exclude all test directories +# for example use the pattern */test/* + +EXCLUDE_PATTERNS += + +# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names +# (namespaces, classes, functions, etc.) that should be excluded from the +# output. The symbol name can be a fully qualified name, a word, or if the +# wildcard * is used, a substring. Examples: ANamespace, AClass, +# AClass::ANamespace, ANamespace::*Test + +EXCLUDE_SYMBOLS += + +# The EXAMPLE_PATH tag can be used to specify one or more files or +# directories that contain example code fragments that are included (see +# the \include command). + +EXAMPLE_PATH += + +# The IMAGE_PATH tag can be used to specify one or more files or +# directories that contain image that are included in the documentation (see +# the \image command). + +IMAGE_PATH += + +# The INCLUDE_PATH tag can be used to specify one or more directories that +# contain include files that are not input files but should be processed by +# the preprocessor. + +INCLUDE_PATH += + +# The PREDEFINED tag can be used to specify one or more macro names that +# are defined before the preprocessor is started (similar to the -D option of +# gcc). The argument of the tag is a list of macros of the form: name +# or name=definition (no spaces). If the definition and the = are +# omitted =1 is assumed. To prevent a macro definition from being +# undefined via #undef or recursively expanded use the := operator +# instead of the = operator. + +PREDEFINED += + +# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then +# this tag can be used to specify a list of macro names that should be expanded. +# The macro definition that is found in the sources will be used. +# Use the PREDEFINED tag if you want to use a different macro definition. + +EXPAND_AS_DEFINED += + +# The DOTFILE_DIRS tag can be used to specify one or more directories that +# contain dot files that are included in the documentation (see the +# \dotfile command). + +DOTFILE_DIRS += ../../kernel/drivers/gpu/arm/midgard/docs + diff --git a/drivers/gpu/arm/midgard/docs/policy_operation_diagram.dot b/drivers/gpu/arm/midgard/docs/policy_operation_diagram.dot new file mode 100644 index 00000000000000..7ae05c2f8ded45 --- /dev/null +++ b/drivers/gpu/arm/midgard/docs/policy_operation_diagram.dot @@ -0,0 +1,112 @@ +/* + * + * (C) COPYRIGHT 2010 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + + +digraph policy_objects_diagram { + rankdir=LR; + size="12,8"; + compound=true; + + node [ shape = box ]; + + subgraph cluster_policy_queues { + low_queue [ shape=record label = "LowP | {ctx_lo | ... | ctx_i | ... | ctx_hi}" ]; + queues_middle_sep [ label="" shape=plaintext width=0 height=0 ]; + + rt_queue [ shape=record label = "RT | {ctx_lo | ... | ctx_j | ... | ctx_hi}" ]; + + label = "Policy's Queue(s)"; + } + + call_enqueue [ shape=plaintext label="enqueue_ctx()" ]; + + { + rank=same; + ordering=out; + call_dequeue [ shape=plaintext label="dequeue_head_ctx()\n+ runpool_add_ctx()" ]; + call_ctxfinish [ shape=plaintext label="runpool_remove_ctx()" ]; + + call_ctxdone [ shape=plaintext label="don't requeue;\n/* ctx has no more jobs */" ]; + } + + subgraph cluster_runpool { + + as0 [ width=2 height = 0.25 label="AS0: Job_1, ..., Job_n" ]; + as1 [ width=2 height = 0.25 label="AS1: Job_1, ..., Job_m" ]; + as2 [ width=2 height = 0.25 label="AS2: Job_1, ..., Job_p" ]; + as3 [ width=2 height = 0.25 label="AS3: Job_1, ..., Job_q" ]; + + label = "Policy's Run Pool"; + } + + { + rank=same; + call_jdequeue [ shape=plaintext label="dequeue_job()" ]; + sstop_dotfixup [ shape=plaintext label="" width=0 height=0 ]; + } + + { + rank=same; + ordering=out; + sstop [ shape=ellipse label="SS-Timer expires" ] + jobslots [ shape=record label="Jobslots: | <0>js[0] | <1>js[1] | <2>js[2]" ]; + + irq [ label="IRQ" shape=ellipse ]; + + job_finish [ shape=plaintext label="don't requeue;\n/* job done */" ]; + } + + hstop [ shape=ellipse label="HS-Timer expires" ] + + /* + * Edges + */ + + call_enqueue -> queues_middle_sep [ lhead=cluster_policy_queues ]; + + low_queue:qr -> call_dequeue:w; + rt_queue:qr -> call_dequeue:w; + + call_dequeue -> as1 [lhead=cluster_runpool]; + + as1->call_jdequeue [ltail=cluster_runpool]; + call_jdequeue->jobslots:0; + call_jdequeue->sstop_dotfixup [ arrowhead=none]; + sstop_dotfixup->sstop [label="Spawn SS-Timer"]; + sstop->jobslots [label="SoftStop"]; + sstop->hstop [label="Spawn HS-Timer"]; + hstop->jobslots:ne [label="HardStop"]; + + + as3->call_ctxfinish:ne [ ltail=cluster_runpool ]; + call_ctxfinish:sw->rt_queue:qm [ lhead=cluster_policy_queues label="enqueue_ctx()\n/* ctx still has jobs */" ]; + + call_ctxfinish->call_ctxdone [constraint=false]; + + call_ctxdone->call_enqueue [weight=0.1 labeldistance=20.0 labelangle=0.0 taillabel="Job submitted to the ctx" style=dotted constraint=false]; + + + { + jobslots->irq [constraint=false]; + + irq->job_finish [constraint=false]; + } + + irq->as2 [lhead=cluster_runpool label="requeue_job()\n/* timeslice expired */" ]; + +} diff --git a/drivers/gpu/arm/midgard/docs/policy_overview.dot b/drivers/gpu/arm/midgard/docs/policy_overview.dot new file mode 100644 index 00000000000000..159b993b7d61f6 --- /dev/null +++ b/drivers/gpu/arm/midgard/docs/policy_overview.dot @@ -0,0 +1,63 @@ +/* + * + * (C) COPYRIGHT 2010 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + + +digraph policy_objects_diagram { + rankdir=LR + size="6,6" + compound=true; + + node [ shape = box ]; + + call_enqueue [ shape=plaintext label="enqueue ctx" ]; + + + policy_queue [ label="Policy's Queue" ]; + + { + rank=same; + runpool [ label="Policy's Run Pool" ]; + + ctx_finish [ label="ctx finished" ]; + } + + { + rank=same; + jobslots [ shape=record label="Jobslots: | <0>js[0] | <1>js[1] | <2>js[2]" ]; + + job_finish [ label="Job finished" ]; + } + + + + /* + * Edges + */ + + call_enqueue -> policy_queue; + + policy_queue->runpool [label="dequeue ctx" weight=0.1]; + runpool->policy_queue [label="requeue ctx" weight=0.1]; + + runpool->ctx_finish [ style=dotted ]; + + runpool->jobslots [label="dequeue job" weight=0.1]; + jobslots->runpool [label="requeue job" weight=0.1]; + + jobslots->job_finish [ style=dotted ]; +} diff --git a/drivers/gpu/arm/midgard/ipa/Kbuild b/drivers/gpu/arm/midgard/ipa/Kbuild new file mode 100644 index 00000000000000..8e37f406433e17 --- /dev/null +++ b/drivers/gpu/arm/midgard/ipa/Kbuild @@ -0,0 +1,27 @@ +# +# (C) COPYRIGHT 2016-2017 ARM Limited. All rights reserved. +# +# This program is free software and is provided to you under the terms of the +# GNU General Public License version 2 as published by the Free Software +# Foundation, and any use by you of this program is subject to the terms +# of such GNU licence. +# +# A copy of the licence is included with the program, and can also be obtained +# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, +# Boston, MA 02110-1301, USA. +# +# + + +mali_kbase-y += \ + ipa/mali_kbase_ipa_simple.o \ + ipa/mali_kbase_ipa.o + +mali_kbase-$(CONFIG_DEBUG_FS) += ipa/mali_kbase_ipa_debugfs.o + +ifneq ($(wildcard $(src)/ipa/mali_kbase_ipa_vinstr_g71.c),) + mali_kbase-y += \ + ipa/mali_kbase_ipa_vinstr_g71.o \ + ipa/mali_kbase_ipa_vinstr_common.o + +endif diff --git a/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa.c b/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa.c new file mode 100644 index 00000000000000..1450c2cf58ed59 --- /dev/null +++ b/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa.c @@ -0,0 +1,590 @@ +/* + * + * (C) COPYRIGHT 2016-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + +#include +#include +#include +#include "mali_kbase.h" +#include "mali_kbase_ipa.h" +#include "mali_kbase_ipa_debugfs.h" +#include "mali_kbase_ipa_simple.h" + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)) +#include +#else +#include +#define dev_pm_opp_find_freq_exact opp_find_freq_exact +#define dev_pm_opp_get_voltage opp_get_voltage +#define dev_pm_opp opp +#endif + +#define KBASE_IPA_FALLBACK_MODEL_NAME "mali-simple-power-model" +#define KBASE_IPA_G71_MODEL_NAME "mali-g71-power-model" + +static struct kbase_ipa_model_ops *kbase_ipa_all_model_ops[] = { + &kbase_simple_ipa_model_ops, + &kbase_g71_ipa_model_ops +}; + +int kbase_ipa_model_recalculate(struct kbase_ipa_model *model) +{ + int err = 0; + + lockdep_assert_held(&model->kbdev->ipa.lock); + + if (model->ops->recalculate) { + err = model->ops->recalculate(model); + if (err) { + dev_err(model->kbdev->dev, + "recalculation of power model %s returned error %d\n", + model->ops->name, err); + } + } + + return err; +} + +static struct kbase_ipa_model_ops *kbase_ipa_model_ops_find(struct kbase_device *kbdev, + const char *name) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(kbase_ipa_all_model_ops); ++i) { + struct kbase_ipa_model_ops *ops = kbase_ipa_all_model_ops[i]; + + if (!strcmp(ops->name, name)) + return ops; + } + + dev_err(kbdev->dev, "power model \'%s\' not found\n", name); + + return NULL; +} + +void kbase_ipa_model_use_fallback_locked(struct kbase_device *kbdev) +{ + atomic_set(&kbdev->ipa_use_configured_model, false); +} + +void kbase_ipa_model_use_configured_locked(struct kbase_device *kbdev) +{ + atomic_set(&kbdev->ipa_use_configured_model, true); +} + +const char *kbase_ipa_model_name_from_id(u32 gpu_id) +{ + const u32 prod_id = (gpu_id & GPU_ID_VERSION_PRODUCT_ID) >> + GPU_ID_VERSION_PRODUCT_ID_SHIFT; + + if (GPU_ID_IS_NEW_FORMAT(prod_id)) { + switch (GPU_ID2_MODEL_MATCH_VALUE(prod_id)) { + case GPU_ID2_PRODUCT_TMIX: + return KBASE_IPA_G71_MODEL_NAME; + default: + return KBASE_IPA_FALLBACK_MODEL_NAME; + } + } + + return KBASE_IPA_FALLBACK_MODEL_NAME; +} + +static struct device_node *get_model_dt_node(struct kbase_ipa_model *model) +{ + struct device_node *model_dt_node; + char compat_string[64]; + + snprintf(compat_string, sizeof(compat_string), "arm,%s", + model->ops->name); + + model_dt_node = of_find_compatible_node(model->kbdev->dev->of_node, + NULL, compat_string); + if (!model_dt_node && !model->missing_dt_node_warning) { + dev_warn(model->kbdev->dev, + "Couldn't find power_model DT node matching \'%s\'\n", + compat_string); + model->missing_dt_node_warning = true; + } + + return model_dt_node; +} + +int kbase_ipa_model_add_param_s32(struct kbase_ipa_model *model, + const char *name, s32 *addr, + size_t num_elems, bool dt_required) +{ + int err, i; + struct device_node *model_dt_node = get_model_dt_node(model); + char *origin; + + err = of_property_read_u32_array(model_dt_node, name, addr, num_elems); + + if (err && dt_required) { + memset(addr, 0, sizeof(s32) * num_elems); + dev_warn(model->kbdev->dev, + "Error %d, no DT entry: %s.%s = %zu*[0]\n", + err, model->ops->name, name, num_elems); + origin = "zero"; + } else if (err && !dt_required) { + origin = "default"; + } else /* !err */ { + origin = "DT"; + } + + /* Create a unique debugfs entry for each element */ + for (i = 0; i < num_elems; ++i) { + char elem_name[32]; + + if (num_elems == 1) + snprintf(elem_name, sizeof(elem_name), "%s", name); + else + snprintf(elem_name, sizeof(elem_name), "%s.%d", + name, i); + + dev_dbg(model->kbdev->dev, "%s.%s = %d (%s)\n", + model->ops->name, elem_name, addr[i], origin); + + err = kbase_ipa_model_param_add(model, elem_name, + &addr[i], sizeof(s32), + PARAM_TYPE_S32); + if (err) + goto exit; + } +exit: + return err; +} + +int kbase_ipa_model_add_param_string(struct kbase_ipa_model *model, + const char *name, char *addr, + size_t size, bool dt_required) +{ + int err; + struct device_node *model_dt_node = get_model_dt_node(model); + const char *string_prop_value; + char *origin; + + err = of_property_read_string(model_dt_node, name, + &string_prop_value); + if (err && dt_required) { + strncpy(addr, "", size - 1); + dev_warn(model->kbdev->dev, + "Error %d, no DT entry: %s.%s = \'%s\'\n", + err, model->ops->name, name, addr); + err = 0; + origin = "zero"; + } else if (err && !dt_required) { + origin = "default"; + } else /* !err */ { + strncpy(addr, string_prop_value, size - 1); + origin = "DT"; + } + + addr[size - 1] = '\0'; + + dev_dbg(model->kbdev->dev, "%s.%s = \'%s\' (%s)\n", + model->ops->name, name, string_prop_value, origin); + + err = kbase_ipa_model_param_add(model, name, addr, size, + PARAM_TYPE_STRING); + + return err; +} + +void kbase_ipa_term_model(struct kbase_ipa_model *model) +{ + if (!model) + return; + + lockdep_assert_held(&model->kbdev->ipa.lock); + + if (model->ops->term) + model->ops->term(model); + + kbase_ipa_model_param_free_all(model); + + kfree(model); +} +KBASE_EXPORT_TEST_API(kbase_ipa_term_model); + +struct kbase_ipa_model *kbase_ipa_init_model(struct kbase_device *kbdev, + struct kbase_ipa_model_ops *ops) +{ + struct kbase_ipa_model *model; + int err; + + lockdep_assert_held(&kbdev->ipa.lock); + + if (!ops || !ops->name) + return NULL; + + model = kzalloc(sizeof(struct kbase_ipa_model), GFP_KERNEL); + if (!model) + return NULL; + + model->kbdev = kbdev; + model->ops = ops; + INIT_LIST_HEAD(&model->params); + + err = model->ops->init(model); + if (err) { + dev_err(kbdev->dev, + "init of power model \'%s\' returned error %d\n", + ops->name, err); + kfree(model); + return NULL; + } + + err = kbase_ipa_model_recalculate(model); + if (err) { + kbase_ipa_term_model(model); + return NULL; + } + + return model; +} +KBASE_EXPORT_TEST_API(kbase_ipa_init_model); + +static void kbase_ipa_term_locked(struct kbase_device *kbdev) +{ + lockdep_assert_held(&kbdev->ipa.lock); + + /* Clean up the models */ + if (kbdev->ipa.configured_model != kbdev->ipa.fallback_model) + kbase_ipa_term_model(kbdev->ipa.configured_model); + kbase_ipa_term_model(kbdev->ipa.fallback_model); + + kbdev->ipa.configured_model = NULL; + kbdev->ipa.fallback_model = NULL; +} + +int kbase_ipa_init(struct kbase_device *kbdev) +{ + + const char *model_name; + struct kbase_ipa_model_ops *ops; + struct kbase_ipa_model *default_model = NULL; + int err; + + mutex_init(&kbdev->ipa.lock); + /* + * Lock during init to avoid warnings from lockdep_assert_held (there + * shouldn't be any concurrent access yet). + */ + mutex_lock(&kbdev->ipa.lock); + + /* The simple IPA model must *always* be present.*/ + ops = kbase_ipa_model_ops_find(kbdev, KBASE_IPA_FALLBACK_MODEL_NAME); + + if (!ops->do_utilization_scaling_in_framework) { + dev_err(kbdev->dev, + "Fallback IPA model %s should not account for utilization\n", + ops->name); + err = -EINVAL; + goto end; + } + + default_model = kbase_ipa_init_model(kbdev, ops); + if (!default_model) { + err = -EINVAL; + goto end; + } + + kbdev->ipa.fallback_model = default_model; + err = of_property_read_string(kbdev->dev->of_node, + "ipa-model", + &model_name); + if (err) { + /* Attempt to load a match from GPU-ID */ + u32 gpu_id; + + gpu_id = kbdev->gpu_props.props.raw_props.gpu_id; + model_name = kbase_ipa_model_name_from_id(gpu_id); + dev_dbg(kbdev->dev, + "Inferring model from GPU ID 0x%x: \'%s\'\n", + gpu_id, model_name); + err = 0; + } else { + dev_dbg(kbdev->dev, + "Using ipa-model parameter from DT: \'%s\'\n", + model_name); + } + + if (strcmp(KBASE_IPA_FALLBACK_MODEL_NAME, model_name) != 0) { + ops = kbase_ipa_model_ops_find(kbdev, model_name); + kbdev->ipa.configured_model = kbase_ipa_init_model(kbdev, ops); + if (!kbdev->ipa.configured_model) { + err = -EINVAL; + goto end; + } + } else { + kbdev->ipa.configured_model = default_model; + } + + kbase_ipa_model_use_configured_locked(kbdev); + +end: + if (err) + kbase_ipa_term_locked(kbdev); + else + dev_info(kbdev->dev, + "Using configured power model %s, and fallback %s\n", + kbdev->ipa.configured_model->ops->name, + kbdev->ipa.fallback_model->ops->name); + + mutex_unlock(&kbdev->ipa.lock); + return err; +} +KBASE_EXPORT_TEST_API(kbase_ipa_init); + +void kbase_ipa_term(struct kbase_device *kbdev) +{ + mutex_lock(&kbdev->ipa.lock); + kbase_ipa_term_locked(kbdev); + mutex_unlock(&kbdev->ipa.lock); +} +KBASE_EXPORT_TEST_API(kbase_ipa_term); + +/** + * kbase_scale_dynamic_power() - Scale a dynamic power coefficient to an OPP + * @c: Dynamic model coefficient, in pW/(Hz V^2). Should be in range + * 0 < c < 2^26 to prevent overflow. + * @freq: Frequency, in Hz. Range: 2^23 < freq < 2^30 (~8MHz to ~1GHz) + * @voltage: Voltage, in mV. Range: 2^9 < voltage < 2^13 (~0.5V to ~8V) + * + * Keep a record of the approximate range of each value at every stage of the + * calculation, to ensure we don't overflow. This makes heavy use of the + * approximations 1000 = 2^10 and 1000000 = 2^20, but does the actual + * calculations in decimal for increased accuracy. + * + * Return: Power consumption, in mW. Range: 0 < p < 2^13 (0W to ~8W) + */ +static u32 kbase_scale_dynamic_power(const u32 c, const u32 freq, + const u32 voltage) +{ + /* Range: 2^8 < v2 < 2^16 m(V^2) */ + const u32 v2 = (voltage * voltage) / 1000; + + /* Range: 2^3 < f_MHz < 2^10 MHz */ + const u32 f_MHz = freq / 1000000; + + /* Range: 2^11 < v2f_big < 2^26 kHz V^2 */ + const u32 v2f_big = v2 * f_MHz; + + /* Range: 2^1 < v2f < 2^16 MHz V^2 */ + const u32 v2f = v2f_big / 1000; + + /* Range (working backwards from next line): 0 < v2fc < 2^23 uW. + * Must be < 2^42 to avoid overflowing the return value. */ + const u64 v2fc = (u64) c * (u64) v2f; + + /* Range: 0 < v2fc / 1000 < 2^13 mW */ + return div_u64(v2fc, 1000); +} + +/** + * kbase_scale_static_power() - Scale a static power coefficient to an OPP + * @c: Static model coefficient, in uW/V^3. Should be in range + * 0 < c < 2^32 to prevent overflow. + * @voltage: Voltage, in mV. Range: 2^9 < voltage < 2^13 (~0.5V to ~8V) + * + * Return: Power consumption, in mW. Range: 0 < p < 2^13 (0W to ~8W) + */ +u32 kbase_scale_static_power(const u32 c, const u32 voltage) +{ + /* Range: 2^8 < v2 < 2^16 m(V^2) */ + const u32 v2 = (voltage * voltage) / 1000; + + /* Range: 2^17 < v3_big < 2^29 m(V^2) mV */ + const u32 v3_big = v2 * voltage; + + /* Range: 2^7 < v3 < 2^19 m(V^3) */ + const u32 v3 = v3_big / 1000; + + /* + * Range (working backwards from next line): 0 < v3c_big < 2^33 nW. + * The result should be < 2^52 to avoid overflowing the return value. + */ + const u64 v3c_big = (u64) c * (u64) v3; + + /* Range: 0 < v3c_big / 1000000 < 2^13 mW */ + return div_u64(v3c_big, 1000000); +} + +static struct kbase_ipa_model *get_current_model(struct kbase_device *kbdev) +{ + lockdep_assert_held(&kbdev->ipa.lock); + + if (atomic_read(&kbdev->ipa_use_configured_model)) + return kbdev->ipa.configured_model; + else + return kbdev->ipa.fallback_model; +} + +static u32 get_static_power_locked(struct kbase_device *kbdev, + struct kbase_ipa_model *model, + unsigned long voltage) +{ + u32 power = 0; + int err; + u32 power_coeff; + + lockdep_assert_held(&model->kbdev->ipa.lock); + + if (!model->ops->get_static_coeff) + model = kbdev->ipa.fallback_model; + + if (model->ops->get_static_coeff) { + err = model->ops->get_static_coeff(model, &power_coeff); + if (!err) + power = kbase_scale_static_power(power_coeff, + (u32) voltage); + } + + return power; +} + +#if defined(CONFIG_MALI_PWRSOFT_765) || \ + LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0) +static unsigned long kbase_get_static_power(struct devfreq *df, + unsigned long voltage) +#else +static unsigned long kbase_get_static_power(unsigned long voltage) +#endif +{ + struct kbase_ipa_model *model; + u32 power = 0; +#if defined(CONFIG_MALI_PWRSOFT_765) || \ + LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0) + struct kbase_device *kbdev = dev_get_drvdata(&df->dev); +#else + struct kbase_device *kbdev = kbase_find_device(-1); +#endif + + mutex_lock(&kbdev->ipa.lock); + + model = get_current_model(kbdev); + power = get_static_power_locked(kbdev, model, voltage); + + mutex_unlock(&kbdev->ipa.lock); + +#if !(defined(CONFIG_MALI_PWRSOFT_765) || \ + LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) + kbase_release_device(kbdev); +#endif + + return power; +} + +#if defined(CONFIG_MALI_PWRSOFT_765) || \ + LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0) +static unsigned long kbase_get_dynamic_power(struct devfreq *df, + unsigned long freq, + unsigned long voltage) +#else +static unsigned long kbase_get_dynamic_power(unsigned long freq, + unsigned long voltage) +#endif +{ + struct kbase_ipa_model *model; + u32 power_coeff = 0, power = 0; + int err = 0; +#if defined(CONFIG_MALI_PWRSOFT_765) || \ + LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0) + struct kbase_device *kbdev = dev_get_drvdata(&df->dev); +#else + struct kbase_device *kbdev = kbase_find_device(-1); +#endif + + mutex_lock(&kbdev->ipa.lock); + + model = kbdev->ipa.fallback_model; + + err = model->ops->get_dynamic_coeff(model, &power_coeff, freq); + + if (!err) + power = kbase_scale_dynamic_power(power_coeff, freq, voltage); + else + dev_err_ratelimited(kbdev->dev, + "Model %s returned error code %d\n", + model->ops->name, err); + + mutex_unlock(&kbdev->ipa.lock); + +#if !(defined(CONFIG_MALI_PWRSOFT_765) || \ + LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) + kbase_release_device(kbdev); +#endif + + return power; +} + +int kbase_get_real_power(struct devfreq *df, u32 *power, + unsigned long freq, + unsigned long voltage) +{ + struct kbase_ipa_model *model; + u32 power_coeff = 0; + int err = 0; + struct kbase_device *kbdev = dev_get_drvdata(&df->dev); + + mutex_lock(&kbdev->ipa.lock); + + model = get_current_model(kbdev); + + err = model->ops->get_dynamic_coeff(model, &power_coeff, freq); + + /* If we switch to protected model between get_current_model() and + * get_dynamic_coeff(), counter reading could fail. If that happens + * (unlikely, but possible), revert to the fallback model. */ + if (err && model != kbdev->ipa.fallback_model) { + model = kbdev->ipa.fallback_model; + err = model->ops->get_dynamic_coeff(model, &power_coeff, freq); + } + + if (err) + goto exit_unlock; + + *power = kbase_scale_dynamic_power(power_coeff, freq, voltage); + + if (model->ops->do_utilization_scaling_in_framework) { + struct devfreq_dev_status *status = &df->last_status; + unsigned long total_time = max(status->total_time, 1ul); + u64 busy_time = min(status->busy_time, total_time); + + *power = div_u64((u64) *power * (u64) busy_time, total_time); + } + + *power += get_static_power_locked(kbdev, model, voltage); + +exit_unlock: + mutex_unlock(&kbdev->ipa.lock); + + return err; +} +KBASE_EXPORT_TEST_API(kbase_get_real_power); + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) +struct devfreq_cooling_ops kbase_ipa_power_model_ops = { +#else +struct devfreq_cooling_power kbase_ipa_power_model_ops = { +#endif + .get_static_power = &kbase_get_static_power, + .get_dynamic_power = &kbase_get_dynamic_power, +#if defined(CONFIG_MALI_PWRSOFT_765) || \ + LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0) + .get_real_power = &kbase_get_real_power, +#endif +}; +KBASE_EXPORT_TEST_API(kbase_ipa_power_model_ops); diff --git a/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa.h b/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa.h new file mode 100644 index 00000000000000..469f33cbdcc6fa --- /dev/null +++ b/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa.h @@ -0,0 +1,165 @@ +/* + * + * (C) COPYRIGHT 2016-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +#ifndef _KBASE_IPA_H_ +#define _KBASE_IPA_H_ + +#if defined(CONFIG_MALI_DEVFREQ) && defined(CONFIG_DEVFREQ_THERMAL) + +struct devfreq; + +struct kbase_ipa_model { + struct list_head link; + struct kbase_device *kbdev; + void *model_data; + struct kbase_ipa_model_ops *ops; + struct list_head params; + bool missing_dt_node_warning; +}; + +/** + * kbase_ipa_model_add_param_s32 - Add an integer model parameter + * @model: pointer to IPA model + * @name: name of corresponding debugfs entry + * @addr: address where the value is stored + * @num_elems: number of elements (1 if not an array) + * @dt_required: if false, a corresponding devicetree entry is not required, + * and the current value will be used. If true, a warning is + * output and the data is zeroed + * + * Return: 0 on success, or an error code + */ +int kbase_ipa_model_add_param_s32(struct kbase_ipa_model *model, + const char *name, s32 *addr, + size_t num_elems, bool dt_required); + +/** + * kbase_ipa_model_add_param_string - Add a string model parameter + * @model: pointer to IPA model + * @name: name of corresponding debugfs entry + * @addr: address where the value is stored + * @size: size, in bytes, of the value storage (so the maximum string + * length is size - 1) + * @dt_required: if false, a corresponding devicetree entry is not required, + * and the current value will be used. If true, a warning is + * output and the data is zeroed + * + * Return: 0 on success, or an error code + */ +int kbase_ipa_model_add_param_string(struct kbase_ipa_model *model, + const char *name, char *addr, + size_t size, bool dt_required); + +struct kbase_ipa_model_ops { + char *name; + /* The init, recalculate and term ops on the default model are always + * called. However, all the other models are only invoked if the model + * is selected in the device tree. Otherwise they are never + * initialized. Additional resources can be acquired by models in + * init(), however they must be terminated in the term(). + */ + int (*init)(struct kbase_ipa_model *model); + /* Called immediately after init(), or when a parameter is changed, so + * that any coefficients derived from model parameters can be + * recalculated. */ + int (*recalculate)(struct kbase_ipa_model *model); + void (*term)(struct kbase_ipa_model *model); + /* + * get_dynamic_coeff() - calculate dynamic power coefficient + * @model: pointer to model + * @coeffp: pointer to return value location + * @current_freq: frequency the GPU has been running at for the + * previous sampling period. + * + * Calculate a dynamic power coefficient, with units pW/(Hz V^2), which + * is then scaled by the IPA framework according to the current OPP's + * frequency and voltage. + * + * Return: 0 on success, or an error code. + */ + int (*get_dynamic_coeff)(struct kbase_ipa_model *model, u32 *coeffp, + u32 current_freq); + /* + * get_static_coeff() - calculate static power coefficient + * @model: pointer to model + * @coeffp: pointer to return value location + * + * Calculate a static power coefficient, with units uW/(V^3), which is + * scaled by the IPA framework according to the current OPP's voltage. + * + * Return: 0 on success, or an error code. + */ + int (*get_static_coeff)(struct kbase_ipa_model *model, u32 *coeffp); + /* If false, the model's get_dynamic_coeff() method accounts for how + * long the GPU was active over the sample period. If true, the + * framework will scale the calculated power according to the + * utilization stats recorded by devfreq in get_real_power(). */ + bool do_utilization_scaling_in_framework; +}; + +/* Models can be registered only in the platform's platform_init_func call */ +int kbase_ipa_model_ops_register(struct kbase_device *kbdev, + struct kbase_ipa_model_ops *new_model_ops); +struct kbase_ipa_model *kbase_ipa_get_model(struct kbase_device *kbdev, + const char *name); + +int kbase_ipa_init(struct kbase_device *kbdev); +void kbase_ipa_term(struct kbase_device *kbdev); +void kbase_ipa_model_use_fallback_locked(struct kbase_device *kbdev); +void kbase_ipa_model_use_configured_locked(struct kbase_device *kbdev); +int kbase_ipa_model_recalculate(struct kbase_ipa_model *model); +struct kbase_ipa_model *kbase_ipa_init_model(struct kbase_device *kbdev, + struct kbase_ipa_model_ops *ops); +void kbase_ipa_term_model(struct kbase_ipa_model *model); + +extern struct kbase_ipa_model_ops kbase_g71_ipa_model_ops; + +#if MALI_UNIT_TEST +/** + * kbase_get_real_power() - get the real power consumption of the GPU + * @df: dynamic voltage and frequency scaling information for the GPU. + * @power: where to store the power consumption, in mW. + * @freq: a frequency, in HZ. + * @voltage: a voltage, in mV. + * + * This function is only exposed for use by unit tests. The returned value + * incorporates both static and dynamic power consumption. + * + * Return: 0 on success, or an error code. + */ +int kbase_get_real_power(struct devfreq *df, u32 *power, + unsigned long freq, + unsigned long voltage); +#endif /* MALI_UNIT_TEST */ + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) +extern struct devfreq_cooling_ops kbase_ipa_power_model_ops; +#else +extern struct devfreq_cooling_power kbase_ipa_power_model_ops; +#endif + +#else /* !(defined(CONFIG_MALI_DEVFREQ) && defined(CONFIG_DEVFREQ_THERMAL)) */ + +static inline void kbase_ipa_model_use_fallback_locked(struct kbase_device *kbdev) +{ } + +static inline void kbase_ipa_model_use_configured_locked(struct kbase_device *kbdev) +{ } + +#endif /* (defined(CONFIG_MALI_DEVFREQ) && defined(CONFIG_DEVFREQ_THERMAL)) */ + +#endif diff --git a/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_debugfs.c b/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_debugfs.c new file mode 100644 index 00000000000000..d3ac7c3ce1a3aa --- /dev/null +++ b/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_debugfs.c @@ -0,0 +1,249 @@ +/* + * + * (C) COPYRIGHT 2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +#include +#include +#include + +#include "mali_kbase.h" +#include "mali_kbase_ipa.h" +#include "mali_kbase_ipa_debugfs.h" + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)) +#define DEFINE_DEBUGFS_ATTRIBUTE DEFINE_SIMPLE_ATTRIBUTE +#endif + +struct kbase_ipa_model_param { + char *name; + union { + void *voidp; + s32 *s32p; + char *str; + } addr; + size_t size; + enum kbase_ipa_model_param_type type; + struct kbase_ipa_model *model; + struct list_head link; +}; + +static int param_int_get(void *data, u64 *val) +{ + struct kbase_ipa_model_param *param = data; + + mutex_lock(¶m->model->kbdev->ipa.lock); + *(s64 *) val = *param->addr.s32p; + mutex_unlock(¶m->model->kbdev->ipa.lock); + + return 0; +} + +static int param_int_set(void *data, u64 val) +{ + struct kbase_ipa_model_param *param = data; + struct kbase_ipa_model *model = param->model; + s64 sval = (s64) val; + int err = 0; + + if (sval < S32_MIN || sval > S32_MAX) + return -ERANGE; + + mutex_lock(¶m->model->kbdev->ipa.lock); + *param->addr.s32p = val; + err = kbase_ipa_model_recalculate(model); + mutex_unlock(¶m->model->kbdev->ipa.lock); + + return err; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_s32, param_int_get, param_int_set, "%lld\n"); + +static ssize_t param_string_get(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct kbase_ipa_model_param *param = file->private_data; + ssize_t ret; + size_t len; + + mutex_lock(¶m->model->kbdev->ipa.lock); + len = strnlen(param->addr.str, param->size - 1) + 1; + ret = simple_read_from_buffer(user_buf, count, ppos, + param->addr.str, len); + mutex_unlock(¶m->model->kbdev->ipa.lock); + + return ret; +} + +static ssize_t param_string_set(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct kbase_ipa_model_param *param = file->private_data; + struct kbase_ipa_model *model = param->model; + ssize_t ret = count; + size_t buf_size; + int err; + + mutex_lock(&model->kbdev->ipa.lock); + + if (count > param->size) { + ret = -EINVAL; + goto end; + } + + buf_size = min(param->size - 1, count); + if (copy_from_user(param->addr.str, user_buf, buf_size)) { + ret = -EFAULT; + goto end; + } + + param->addr.str[buf_size] = '\0'; + + err = kbase_ipa_model_recalculate(model); + if (err < 0) + ret = err; + +end: + mutex_unlock(&model->kbdev->ipa.lock); + + return ret; +} + +static const struct file_operations fops_string = { + .read = param_string_get, + .write = param_string_set, + .open = simple_open, + .llseek = default_llseek, +}; + +int kbase_ipa_model_param_add(struct kbase_ipa_model *model, const char *name, + void *addr, size_t size, + enum kbase_ipa_model_param_type type) +{ + struct kbase_ipa_model_param *param; + + param = kzalloc(sizeof(*param), GFP_KERNEL); + + if (!param) + return -ENOMEM; + + /* 'name' is stack-allocated for array elements, so copy it into + * heap-allocated storage */ + param->name = kstrdup(name, GFP_KERNEL); + + if (!param->name) { + kfree(param); + return -ENOMEM; + } + + param->addr.voidp = addr; + param->size = size; + param->type = type; + param->model = model; + + list_add(¶m->link, &model->params); + + return 0; +} + +void kbase_ipa_model_param_free_all(struct kbase_ipa_model *model) +{ + struct kbase_ipa_model_param *param_p, *param_n; + + list_for_each_entry_safe(param_p, param_n, &model->params, link) { + list_del(¶m_p->link); + kfree(param_p->name); + kfree(param_p); + } +} + +static void kbase_ipa_model_debugfs_init(struct kbase_ipa_model *model) +{ + struct list_head *it; + struct dentry *dir; + + lockdep_assert_held(&model->kbdev->ipa.lock); + + dir = debugfs_create_dir(model->ops->name, + model->kbdev->mali_debugfs_directory); + + if (!dir) { + dev_err(model->kbdev->dev, + "Couldn't create mali debugfs %s directory", + model->ops->name); + return; + } + + list_for_each(it, &model->params) { + struct kbase_ipa_model_param *param = + list_entry(it, + struct kbase_ipa_model_param, + link); + const struct file_operations *fops = NULL; + + switch (param->type) { + case PARAM_TYPE_S32: + fops = &fops_s32; + break; + case PARAM_TYPE_STRING: + fops = &fops_string; + break; + } + + if (unlikely(!fops)) { + dev_err(model->kbdev->dev, + "Type not set for %s parameter %s\n", + model->ops->name, param->name); + } else { + debugfs_create_file(param->name, S_IRUGO | S_IWUSR, + dir, param, fops); + } + } +} + +void kbase_ipa_model_param_set_s32(struct kbase_ipa_model *model, + const char *name, s32 val) +{ + struct kbase_ipa_model_param *param; + + mutex_lock(&model->kbdev->ipa.lock); + + list_for_each_entry(param, &model->params, link) { + if (!strcmp(param->name, name)) { + if (param->type == PARAM_TYPE_S32) { + *param->addr.s32p = val; + } else { + dev_err(model->kbdev->dev, + "Wrong type for %s parameter %s\n", + model->ops->name, param->name); + } + break; + } + } + + mutex_unlock(&model->kbdev->ipa.lock); +} +KBASE_EXPORT_TEST_API(kbase_ipa_model_param_set_s32); + +void kbase_ipa_debugfs_init(struct kbase_device *kbdev) +{ + mutex_lock(&kbdev->ipa.lock); + + if (kbdev->ipa.configured_model != kbdev->ipa.fallback_model) + kbase_ipa_model_debugfs_init(kbdev->ipa.configured_model); + kbase_ipa_model_debugfs_init(kbdev->ipa.fallback_model); + + mutex_unlock(&kbdev->ipa.lock); +} diff --git a/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_debugfs.h b/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_debugfs.h new file mode 100644 index 00000000000000..f624de9d30171c --- /dev/null +++ b/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_debugfs.h @@ -0,0 +1,63 @@ +/* + * + * (C) COPYRIGHT 2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +#ifndef _KBASE_IPA_DEBUGFS_H_ +#define _KBASE_IPA_DEBUGFS_H_ + +enum kbase_ipa_model_param_type { + PARAM_TYPE_S32 = 1, + PARAM_TYPE_STRING, +}; + +#ifdef CONFIG_DEBUG_FS + +void kbase_ipa_debugfs_init(struct kbase_device *kbdev); +int kbase_ipa_model_param_add(struct kbase_ipa_model *model, const char *name, + void *addr, size_t size, + enum kbase_ipa_model_param_type type); +void kbase_ipa_model_param_free_all(struct kbase_ipa_model *model); + +/** + * kbase_ipa_model_param_set_s32 - Set an integer model parameter + * + * @model: pointer to IPA model + * @name: name of corresponding debugfs entry + * @val: new value of the parameter + * + * This function is only exposed for use by unit tests running in + * kernel space. Normally it is expected that parameter values will + * instead be set via debugfs. + */ +void kbase_ipa_model_param_set_s32(struct kbase_ipa_model *model, + const char *name, s32 val); + +#else /* CONFIG_DEBUG_FS */ + +static inline int kbase_ipa_model_param_add(struct kbase_ipa_model *model, + const char *name, void *addr, + size_t size, + enum kbase_ipa_model_param_type type) +{ + return 0; +} + +static inline void kbase_ipa_model_param_free_all(struct kbase_ipa_model *model) +{ } + +#endif /* CONFIG_DEBUG_FS */ + +#endif /* _KBASE_IPA_DEBUGFS_H_ */ diff --git a/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_simple.c b/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_simple.c new file mode 100644 index 00000000000000..70e08b3d1486fb --- /dev/null +++ b/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_simple.c @@ -0,0 +1,323 @@ +/* + * + * (C) COPYRIGHT 2016-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +#include +#ifdef CONFIG_DEVFREQ_THERMAL +#include +#endif +#include +#include +#include + +#include "mali_kbase.h" +#include "mali_kbase_defs.h" +#include "mali_kbase_ipa_simple.h" +#include "mali_kbase_ipa_debugfs.h" + +#if MALI_UNIT_TEST + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0) +static unsigned long dummy_temp; + +static int kbase_simple_power_model_get_dummy_temp( + struct thermal_zone_device *tz, + unsigned long *temp) +{ + *temp = ACCESS_ONCE(dummy_temp); + return 0; +} + +#else +static int dummy_temp; + +static int kbase_simple_power_model_get_dummy_temp( + struct thermal_zone_device *tz, + int *temp) +{ + *temp = ACCESS_ONCE(dummy_temp); + return 0; +} +#endif + +/* Intercept calls to the kernel function using a macro */ +#ifdef thermal_zone_get_temp +#undef thermal_zone_get_temp +#endif +#define thermal_zone_get_temp(tz, temp) \ + kbase_simple_power_model_get_dummy_temp(tz, temp) + +void kbase_simple_power_model_set_dummy_temp(int temp) +{ + ACCESS_ONCE(dummy_temp) = temp; +} +KBASE_EXPORT_TEST_API(kbase_simple_power_model_set_dummy_temp); + +#endif /* MALI_UNIT_TEST */ + +/* + * This model is primarily designed for the Juno platform. It may not be + * suitable for other platforms. The additional resources in this model + * should preferably be minimal, as this model is rarely used when a dynamic + * model is available. + */ + +/** + * struct kbase_ipa_model_simple_data - IPA context per device + * @dynamic_coefficient: dynamic coefficient of the model + * @static_coefficient: static coefficient of the model + * @ts: Thermal scaling coefficients of the model + * @tz_name: Thermal zone name + * @gpu_tz: thermal zone device + * @poll_temperature_thread: Handle for temperature polling thread + * @current_temperature: Most recent value of polled temperature + * @temperature_poll_interval_ms: How often temperature should be checked, in ms + */ + +struct kbase_ipa_model_simple_data { + u32 dynamic_coefficient; + u32 static_coefficient; + s32 ts[4]; + char tz_name[16]; + struct thermal_zone_device *gpu_tz; + struct task_struct *poll_temperature_thread; + int current_temperature; + int temperature_poll_interval_ms; +}; +#define FALLBACK_STATIC_TEMPERATURE 55000 + +/** + * calculate_temp_scaling_factor() - Calculate temperature scaling coefficient + * @ts: Signed coefficients, in order t^0 to t^3, with units Deg^-N + * @t: Temperature, in mDeg C. Range: -2^17 < t < 2^17 + * + * Scale the temperature according to a cubic polynomial whose coefficients are + * provided in the device tree. The result is used to scale the static power + * coefficient, where 1000000 means no change. + * + * Return: Temperature scaling factor. Range 0 <= ret <= 10,000,000. + */ +static u32 calculate_temp_scaling_factor(s32 ts[4], s64 t) +{ + /* Range: -2^24 < t2 < 2^24 m(Deg^2) */ + const s64 t2 = div_s64((t * t), 1000); + + /* Range: -2^31 < t3 < 2^31 m(Deg^3) */ + const s64 t3 = div_s64((t * t2), 1000); + + /* + * Sum the parts. t^[1-3] are in m(Deg^N), but the coefficients are in + * Deg^-N, so we need to multiply the last coefficient by 1000. + * Range: -2^63 < res_big < 2^63 + */ + const s64 res_big = ts[3] * t3 /* +/- 2^62 */ + + ts[2] * t2 /* +/- 2^55 */ + + ts[1] * t /* +/- 2^48 */ + + ts[0] * 1000; /* +/- 2^41 */ + + /* Range: -2^60 < res_unclamped < 2^60 */ + s64 res_unclamped = div_s64(res_big, 1000); + + /* Clamp to range of 0x to 10x the static power */ + return clamp(res_unclamped, (s64) 0, (s64) 10000000); +} + +/* We can't call thermal_zone_get_temp() directly in model_static_coeff(), + * because we don't know if tz->lock is held in the same thread. So poll it in + * a separate thread to get around this. */ +static int poll_temperature(void *data) +{ + struct kbase_ipa_model_simple_data *model_data = + (struct kbase_ipa_model_simple_data *) data; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0) + unsigned long temp; +#else + int temp; +#endif + + while (!kthread_should_stop()) { + struct thermal_zone_device *tz = ACCESS_ONCE(model_data->gpu_tz); + + if (tz) { + int ret; + + ret = thermal_zone_get_temp(tz, &temp); + if (ret) { + pr_warn_ratelimited("Error reading temperature for gpu thermal zone: %d\n", + ret); + temp = FALLBACK_STATIC_TEMPERATURE; + } + } else { + temp = FALLBACK_STATIC_TEMPERATURE; + } + + ACCESS_ONCE(model_data->current_temperature) = temp; + + msleep_interruptible(ACCESS_ONCE(model_data->temperature_poll_interval_ms)); + } + + return 0; +} + +static int model_static_coeff(struct kbase_ipa_model *model, u32 *coeffp) +{ + u32 temp_scaling_factor; + struct kbase_ipa_model_simple_data *model_data = + (struct kbase_ipa_model_simple_data *) model->model_data; + u64 coeff_big; + int temp; + + temp = ACCESS_ONCE(model_data->current_temperature); + + /* Range: 0 <= temp_scaling_factor < 2^24 */ + temp_scaling_factor = calculate_temp_scaling_factor(model_data->ts, + temp); + + /* + * Range: 0 <= coeff_big < 2^52 to avoid overflowing *coeffp. This + * means static_coefficient must be in range + * 0 <= static_coefficient < 2^28. + */ + coeff_big = (u64) model_data->static_coefficient * (u64) temp_scaling_factor; + *coeffp = div_u64(coeff_big, 1000000); + + return 0; +} + +static int model_dynamic_coeff(struct kbase_ipa_model *model, u32 *coeffp, + u32 current_freq) +{ + struct kbase_ipa_model_simple_data *model_data = + (struct kbase_ipa_model_simple_data *) model->model_data; + + *coeffp = model_data->dynamic_coefficient; + + return 0; +} + +static int add_params(struct kbase_ipa_model *model) +{ + int err = 0; + struct kbase_ipa_model_simple_data *model_data = + (struct kbase_ipa_model_simple_data *)model->model_data; + + err = kbase_ipa_model_add_param_s32(model, "static-coefficient", + &model_data->static_coefficient, + 1, true); + if (err) + goto end; + + err = kbase_ipa_model_add_param_s32(model, "dynamic-coefficient", + &model_data->dynamic_coefficient, + 1, true); + if (err) + goto end; + + err = kbase_ipa_model_add_param_s32(model, "ts", + model_data->ts, 4, true); + if (err) + goto end; + + err = kbase_ipa_model_add_param_string(model, "thermal-zone", + model_data->tz_name, + sizeof(model_data->tz_name), true); + if (err) + goto end; + + model_data->temperature_poll_interval_ms = 200; + err = kbase_ipa_model_add_param_s32(model, "temp-poll-interval-ms", + &model_data->temperature_poll_interval_ms, + 1, false); + +end: + return err; +} + +static int kbase_simple_power_model_init(struct kbase_ipa_model *model) +{ + int err; + struct kbase_ipa_model_simple_data *model_data; + + model_data = kzalloc(sizeof(struct kbase_ipa_model_simple_data), + GFP_KERNEL); + if (!model_data) + return -ENOMEM; + + model->model_data = (void *) model_data; + + model_data->current_temperature = FALLBACK_STATIC_TEMPERATURE; + model_data->poll_temperature_thread = kthread_run(poll_temperature, + (void *) model_data, + "mali-simple-power-model-temp-poll"); + if (IS_ERR(model_data->poll_temperature_thread)) { + kfree(model_data); + return PTR_ERR(model_data->poll_temperature_thread); + } + + err = add_params(model); + if (err) { + kbase_ipa_model_param_free_all(model); + kthread_stop(model_data->poll_temperature_thread); + kfree(model_data); + } + + return err; +} + +static int kbase_simple_power_model_recalculate(struct kbase_ipa_model *model) +{ + struct kbase_ipa_model_simple_data *model_data = + (struct kbase_ipa_model_simple_data *)model->model_data; + struct thermal_zone_device *tz; + + if (!strnlen(model_data->tz_name, sizeof(model_data->tz_name))) { + tz = NULL; + } else { + tz = thermal_zone_get_zone_by_name(model_data->tz_name); + + if (IS_ERR_OR_NULL(tz)) { + pr_warn_ratelimited("Error %ld getting thermal zone \'%s\', not yet ready?\n", + PTR_ERR(tz), model_data->tz_name); + tz = NULL; + return -EPROBE_DEFER; + } + } + + ACCESS_ONCE(model_data->gpu_tz) = tz; + + return 0; +} + +static void kbase_simple_power_model_term(struct kbase_ipa_model *model) +{ + struct kbase_ipa_model_simple_data *model_data = + (struct kbase_ipa_model_simple_data *)model->model_data; + + kthread_stop(model_data->poll_temperature_thread); + + kfree(model_data); +} + +struct kbase_ipa_model_ops kbase_simple_ipa_model_ops = { + .name = "mali-simple-power-model", + .init = &kbase_simple_power_model_init, + .recalculate = &kbase_simple_power_model_recalculate, + .term = &kbase_simple_power_model_term, + .get_dynamic_coeff = &model_dynamic_coeff, + .get_static_coeff = &model_static_coeff, + .do_utilization_scaling_in_framework = true, +}; +KBASE_EXPORT_TEST_API(kbase_simple_ipa_model_ops); diff --git a/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_simple.h b/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_simple.h new file mode 100644 index 00000000000000..e78d6173300bd8 --- /dev/null +++ b/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_simple.h @@ -0,0 +1,40 @@ +/* + * + * (C) COPYRIGHT 2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +#ifndef _KBASE_IPA_SIMPLE_H_ +#define _KBASE_IPA_SIMPLE_H_ + +#if defined(CONFIG_MALI_DEVFREQ) && defined(CONFIG_DEVFREQ_THERMAL) + +extern struct kbase_ipa_model_ops kbase_simple_ipa_model_ops; + +#if MALI_UNIT_TEST +/** + * kbase_simple_power_model_set_dummy_temp() - set a dummy temperature value + * @temp: Temperature of the thermal zone, in millidegrees celsius. + * + * This is only intended for use in unit tests, to ensure that the temperature + * values used by the simple power model are predictable. Deterministic + * behavior is necessary to allow validation of the static power values + * computed by this model. + */ +void kbase_simple_power_model_set_dummy_temp(int temp); +#endif /* MALI_UNIT_TEST */ + +#endif /* (defined(CONFIG_MALI_DEVFREQ) && defined(CONFIG_DEVFREQ_THERMAL)) */ + +#endif /* _KBASE_IPA_SIMPLE_H_ */ diff --git a/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_vinstr_common.c b/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_vinstr_common.c new file mode 100644 index 00000000000000..9b9fa0e08ee428 --- /dev/null +++ b/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_vinstr_common.c @@ -0,0 +1,229 @@ +/* + * + * (C) COPYRIGHT 2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +#include "mali_kbase_ipa_vinstr_common.h" + +#if MALI_UNIT_TEST +static ktime_t dummy_time; + +/* Intercept calls to the kernel function using a macro */ +#ifdef ktime_get +#undef ktime_get +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0) +#define ktime_get() (ACCESS_ONCE(dummy_time)) + +void kbase_ipa_set_dummy_time(ktime_t t) +{ + ACCESS_ONCE(dummy_time) = t; +} +KBASE_EXPORT_TEST_API(kbase_ipa_set_dummy_time); +#else +#define ktime_get() (READ_ONCE(dummy_time)) + +void kbase_ipa_set_dummy_time(ktime_t t) +{ + WRITE_ONCE(dummy_time, t); +} +KBASE_EXPORT_TEST_API(kbase_ipa_set_dummy_time); + +#endif + +#endif /* MALI_UNIT_TEST */ + +/** + * read_hwcnt() - read a counter value + * @model_data: pointer to model data + * @offset: offset, in bytes, into vinstr buffer + * + * Return: A 32-bit counter value. Range: 0 < value < 2^27 (worst case would be + * incrementing every cycle over a ~100ms sample period at a high frequency, + * e.g. 1 GHz: 2^30 * 0.1seconds ~= 2^27. + */ +static inline u32 kbase_ipa_read_hwcnt( + struct kbase_ipa_model_vinstr_data *model_data, + u32 offset) +{ + u8 *p = model_data->vinstr_buffer; + + return *(u32 *)&p[offset]; +} + +static inline s64 kbase_ipa_add_saturate(s64 a, s64 b) +{ + if (S64_MAX - a < b) + return S64_MAX; + return a + b; +} + +s64 kbase_ipa_sum_all_shader_cores( + struct kbase_ipa_model_vinstr_data *model_data, + s32 coeff, u32 counter) +{ + struct kbase_device *kbdev = model_data->kbdev; + u64 core_mask; + u32 base = 0; + s64 ret = 0; + + core_mask = kbdev->gpu_props.props.coherency_info.group[0].core_mask; + while (core_mask != 0ull) { + if ((core_mask & 1ull) != 0ull) { + /* 0 < counter_value < 2^27 */ + u32 counter_value = kbase_ipa_read_hwcnt(model_data, + base + counter); + + /* 0 < ret < 2^27 * max_num_cores = 2^32 */ + ret = kbase_ipa_add_saturate(ret, counter_value); + } + base += KBASE_IPA_NR_BYTES_PER_BLOCK; + core_mask >>= 1; + } + + /* Range: -2^54 < ret < 2^54 */ + ret *= coeff; + + return div_s64(ret, 1000000); +} + +s64 kbase_ipa_single_counter( + struct kbase_ipa_model_vinstr_data *model_data, + s32 coeff, u32 counter) +{ + /* Range: 0 < counter_value < 2^27 */ + const u32 counter_value = kbase_ipa_read_hwcnt(model_data, counter); + + /* Range: -2^49 < ret < 2^49 */ + const s64 multiplied = (s64) counter_value * (s64) coeff; + + /* Range: -2^29 < return < 2^29 */ + return div_s64(multiplied, 1000000); +} + +int kbase_ipa_attach_vinstr(struct kbase_ipa_model_vinstr_data *model_data) +{ + struct kbase_device *kbdev = model_data->kbdev; + struct kbase_uk_hwcnt_reader_setup setup; + size_t dump_size; + + dump_size = kbase_vinstr_dump_size(kbdev); + model_data->vinstr_buffer = kzalloc(dump_size, GFP_KERNEL); + if (!model_data->vinstr_buffer) { + dev_err(kbdev->dev, "Failed to allocate IPA dump buffer"); + return -1; + } + + setup.jm_bm = ~0u; + setup.shader_bm = ~0u; + setup.tiler_bm = ~0u; + setup.mmu_l2_bm = ~0u; + model_data->vinstr_cli = kbase_vinstr_hwcnt_kernel_setup(kbdev->vinstr_ctx, + &setup, model_data->vinstr_buffer); + if (!model_data->vinstr_cli) { + dev_err(kbdev->dev, "Failed to register IPA with vinstr core"); + kfree(model_data->vinstr_buffer); + model_data->vinstr_buffer = NULL; + return -1; + } + + model_data->last_sample_read_time = ktime_get(); + kbase_vinstr_hwc_clear(model_data->vinstr_cli); + + return 0; +} + +void kbase_ipa_detach_vinstr(struct kbase_ipa_model_vinstr_data *model_data) +{ + if (model_data->vinstr_cli) + kbase_vinstr_detach_client(model_data->vinstr_cli); + model_data->vinstr_cli = NULL; + kfree(model_data->vinstr_buffer); + model_data->vinstr_buffer = NULL; +} + +int kbase_ipa_vinstr_dynamic_coeff(struct kbase_ipa_model *model, u32 *coeffp, + u32 current_freq) +{ + struct kbase_ipa_model_vinstr_data *model_data = + (struct kbase_ipa_model_vinstr_data *)model->model_data; + s64 energy = 0; + size_t i; + ktime_t now = ktime_get(); + ktime_t time_since_last_sample = + ktime_sub(now, model_data->last_sample_read_time); + /* Range: 2^0 < time_since_last_sample_ms < 2^10 (1-1000ms) */ + s64 time_since_last_sample_ms = ktime_to_ms(time_since_last_sample); + u64 coeff = 0; + u64 num_cycles; + int err = 0; + + err = kbase_vinstr_hwc_dump(model_data->vinstr_cli, + BASE_HWCNT_READER_EVENT_MANUAL); + if (err) + goto err0; + + model_data->last_sample_read_time = now; + + /* Range of 'energy' is +/- 2^34 * number of IPA groups, so around + * -2^38 < energy < 2^38 */ + for (i = 0; i < model_data->groups_def_num; i++) { + const struct kbase_ipa_group *group = &model_data->groups_def[i]; + s32 coeff, group_energy; + + coeff = model_data->group_values[i]; + group_energy = group->op(model_data, coeff, group->counter_block_offset); + + energy = kbase_ipa_add_saturate(energy, group_energy); + } + + /* Range: 0 <= coeff < 2^38 */ + if (energy > 0) + coeff = energy; + + /* Scale by user-specified factor and divide by 1000. But actually + * cancel the division out, because we want the num_cycles in KHz and + * don't want to lose precision. */ + + /* Range: 0 < coeff < 2^53 */ + coeff = coeff * model_data->scaling_factor; + + if (time_since_last_sample_ms == 0) { + time_since_last_sample_ms = 1; + } else if (time_since_last_sample_ms < 0) { + err = -ERANGE; + goto err0; + } + + /* Range: 2^20 < num_cycles < 2^40 mCycles */ + num_cycles = (u64) current_freq * (u64) time_since_last_sample_ms; + /* Range: 2^10 < num_cycles < 2^30 Cycles */ + num_cycles = div_u64(num_cycles, 1000000); + + /* num_cycles should never be 0 in _normal_ usage (because we expect + * frequencies on the order of MHz and >10ms polling intervals), but + * protect against divide-by-zero anyway. */ + if (num_cycles == 0) + num_cycles = 1; + + /* Range: 0 < coeff < 2^43 */ + coeff = div_u64(coeff, num_cycles); + +err0: + /* Clamp to a sensible range - 2^16 gives about 14W at 400MHz/750mV */ + *coeffp = clamp(coeff, (u64) 0, (u64) 1 << 16); + return err; +} diff --git a/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_vinstr_common.h b/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_vinstr_common.h new file mode 100644 index 00000000000000..d212c875951ba6 --- /dev/null +++ b/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_vinstr_common.h @@ -0,0 +1,163 @@ +/* + * + * (C) COPYRIGHT 2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +#ifndef _KBASE_IPA_VINSTR_COMMON_H_ +#define _KBASE_IPA_VINSTR_COMMON_H_ + +#include "mali_kbase.h" + +/* Maximum length for the name of an IPA group. */ +#define KBASE_IPA_MAX_GROUP_NAME_LEN 15 + +/* Maximum number of IPA groups for an IPA model. */ +#define KBASE_IPA_MAX_GROUP_DEF_NUM 16 + +/* Number of bytes per hardware counter in a vinstr_buffer. */ +#define KBASE_IPA_NR_BYTES_PER_CNT 4 + +/* Number of hardware counters per block in a vinstr_buffer. */ +#define KBASE_IPA_NR_CNT_PER_BLOCK 64 + +/* Number of bytes per block in a vinstr_buffer. */ +#define KBASE_IPA_NR_BYTES_PER_BLOCK \ + (KBASE_IPA_NR_CNT_PER_BLOCK * KBASE_IPA_NR_BYTES_PER_CNT) + + + +/** + * struct kbase_ipa_model_vinstr_data - IPA context per device + * @kbdev: pointer to kbase device + * @groups_def: Array of IPA groups. + * @groups_def_num: Number of elements in the array of IPA groups. + * @vinstr_cli: vinstr client handle + * @vinstr_buffer: buffer to dump hardware counters onto + * @last_sample_read_time: timestamp of last vinstr buffer read + * @scaling_factor: user-specified power scaling factor. This is + * interpreted as a fraction where the denominator is + * 1000. Range approx 0.0-32.0: + * 0 < scaling_factor < 2^15 + */ +struct kbase_ipa_model_vinstr_data { + struct kbase_device *kbdev; + s32 group_values[KBASE_IPA_MAX_GROUP_DEF_NUM]; + const struct kbase_ipa_group *groups_def; + size_t groups_def_num; + struct kbase_vinstr_client *vinstr_cli; + void *vinstr_buffer; + ktime_t last_sample_read_time; + s32 scaling_factor; +}; + +/** + * struct ipa_group - represents a single IPA group + * @name: name of the IPA group + * @default_value: default value of coefficient for IPA group. + * Coefficients are interpreted as fractions where the + * denominator is 1000000. + * @op: which operation to be performed on the counter values + * @counter_block_offset: block offset in bytes of the counter used to calculate energy for IPA group + */ +struct kbase_ipa_group { + char name[KBASE_IPA_MAX_GROUP_NAME_LEN + 1]; + s32 default_value; + s64 (*op)(struct kbase_ipa_model_vinstr_data *, s32, u32); + u32 counter_block_offset; +}; + +/** + * sum_all_shader_cores() - sum a counter over all cores + * @model_data pointer to model data + * @coeff model coefficient. Unity is ~2^20, so range approx + * +/- 4.0: -2^22 < coeff < 2^22 + * @counter offset in bytes of the counter used to calculate energy for IPA group + * + * Calculate energy estimation based on hardware counter `counter' + * across all shader cores. + * + * Return: Sum of counter values. Range: -2^34 < ret < 2^34 + */ +s64 kbase_ipa_sum_all_shader_cores( + struct kbase_ipa_model_vinstr_data *model_data, + s32 coeff, u32 counter); + +/** + * sum_single_counter() - sum a single counter + * @model_data pointer to model data + * @coeff model coefficient. Unity is ~2^20, so range approx + * +/- 4.0: -2^22 < coeff < 2^22 + * @counter offset in bytes of the counter used to calculate energy for IPA group + * + * Calculate energy estimation based on hardware counter `counter'. + * + * Return: Counter value. Range: -2^34 < ret < 2^34 + */ +s64 kbase_ipa_single_counter( + struct kbase_ipa_model_vinstr_data *model_data, + s32 coeff, u32 counter); + +/** + * attach_vinstr() - attach a vinstr_buffer to an IPA model. + * @model_data pointer to model data + * + * Attach a vinstr_buffer to an IPA model. The vinstr_buffer + * allows access to the hardware counters used to calculate + * energy consumption. + * + * Return: 0 on success, or an error code. + */ +int kbase_ipa_attach_vinstr(struct kbase_ipa_model_vinstr_data *model_data); + +/** + * detach_vinstr() - detach a vinstr_buffer from an IPA model. + * @model_data pointer to model data + * + * Detach a vinstr_buffer from an IPA model. + */ +void kbase_ipa_detach_vinstr(struct kbase_ipa_model_vinstr_data *model_data); + +/** + * kbase_ipa_vinstr_dynamic_coeff() - calculate dynamic power based on HW counters + * @model: pointer to instantiated model + * @coeffp: pointer to location where calculated power, in + * pW/(Hz V^2), is stored. + * @current_freq: frequency the GPU has been running at over the sample + * period. In Hz. Range: 10 MHz < 1GHz, + * 2^20 < current_freq < 2^30 + * + * This is a GPU-agnostic implementation of the get_dynamic_coeff() + * function of an IPA model. It relies on the model being populated + * with GPU-specific attributes at initialization time. + * + * Return: 0 on success, or an error code. + */ +int kbase_ipa_vinstr_dynamic_coeff(struct kbase_ipa_model *model, u32 *coeffp, + u32 current_freq); + +#if MALI_UNIT_TEST +/** + * kbase_ipa_set_dummy_time() - set a dummy monotonic time value + * @t: a monotonic time value + * + * This is only intended for use in unit tests, to ensure that the kernel time + * values used by a power model are predictable. Deterministic behavior is + * necessary to allow validation of the dynamic power values computed by the + * model. + */ +void kbase_ipa_set_dummy_time(ktime_t t); +#endif /* MALI_UNIT_TEST */ + +#endif /* _KBASE_IPA_VINSTR_COMMON_H_ */ diff --git a/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_vinstr_g71.c b/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_vinstr_g71.c new file mode 100644 index 00000000000000..4e4c05928e388c --- /dev/null +++ b/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_vinstr_g71.c @@ -0,0 +1,251 @@ +/* + * + * (C) COPYRIGHT 2016-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + +#include + +#include "mali_kbase_ipa_vinstr_common.h" +#include "mali_kbase.h" +#include "mali_kbase_ipa_debugfs.h" + + +/* Performance counter blocks base offsets */ +#define JM_BASE (0 * KBASE_IPA_NR_BYTES_PER_BLOCK) +#define TILER_BASE (1 * KBASE_IPA_NR_BYTES_PER_BLOCK) +#define MEMSYS_BASE (2 * KBASE_IPA_NR_BYTES_PER_BLOCK) +#define SC0_BASE_ONE_MEMSYS (3 * KBASE_IPA_NR_BYTES_PER_BLOCK) +#define SC0_BASE_TWO_MEMSYS (4 * KBASE_IPA_NR_BYTES_PER_BLOCK) + +/* JM counter block offsets */ +#define JM_GPU_ACTIVE (KBASE_IPA_NR_BYTES_PER_CNT * 6) + +/* Tiler counter block offsets */ +#define TILER_ACTIVE (KBASE_IPA_NR_BYTES_PER_CNT * 45) + +/* MEMSYS counter block offsets */ +#define MEMSYS_L2_ANY_LOOKUP (KBASE_IPA_NR_BYTES_PER_CNT * 25) + +/* SC counter block offsets */ +#define SC_FRAG_ACTIVE (KBASE_IPA_NR_BYTES_PER_CNT * 4) +#define SC_EXEC_CORE_ACTIVE (KBASE_IPA_NR_BYTES_PER_CNT * 26) +#define SC_EXEC_INSTR_COUNT (KBASE_IPA_NR_BYTES_PER_CNT * 28) +#define SC_TEX_COORD_ISSUE (KBASE_IPA_NR_BYTES_PER_CNT * 40) +#define SC_VARY_SLOT_32 (KBASE_IPA_NR_BYTES_PER_CNT * 50) +#define SC_VARY_SLOT_16 (KBASE_IPA_NR_BYTES_PER_CNT * 51) +#define SC_BEATS_RD_LSC (KBASE_IPA_NR_BYTES_PER_CNT * 56) +#define SC_BEATS_WR_LSC (KBASE_IPA_NR_BYTES_PER_CNT * 61) +#define SC_BEATS_WR_TIB (KBASE_IPA_NR_BYTES_PER_CNT * 62) + +/** Maximum number of cores for which a single Memory System block of performance counters is present. */ +#define KBASE_G71_SINGLE_MEMSYS_MAX_NUM_CORES ((u8)4) + + +/** + * get_jm_counter() - get performance counter offset inside the Job Manager block + * @model_data: pointer to GPU model data. + * @counter_block_offset: offset in bytes of the performance counter inside the Job Manager block. + * + * Return: Block offset in bytes of the required performance counter. + */ +static u32 kbase_g71_power_model_get_jm_counter(struct kbase_ipa_model_vinstr_data *model_data, + u32 counter_block_offset) +{ + return JM_BASE + counter_block_offset; +} + +/** + * get_memsys_counter() - get peformance counter offset inside the Memory System block + * @model_data: pointer to GPU model data. + * @counter_block_offset: offset in bytes of the performance counter inside the (first) Memory System block. + * + * Return: Block offset in bytes of the required performance counter. + */ +static u32 kbase_g71_power_model_get_memsys_counter(struct kbase_ipa_model_vinstr_data *model_data, + u32 counter_block_offset) +{ + /* The base address of Memory System performance counters is always the same, although their number + * may vary based on the number of cores. For the moment it's ok to return a constant. + */ + return MEMSYS_BASE + counter_block_offset; +} + +/** + * get_sc_counter() - get performance counter offset inside the Shader Cores block + * @model_data: pointer to GPU model data. + * @counter_block_offset: offset in bytes of the performance counter inside the (first) Shader Cores block. + * + * Return: Block offset in bytes of the required performance counter. + */ +static u32 kbase_g71_power_model_get_sc_counter(struct kbase_ipa_model_vinstr_data *model_data, + u32 counter_block_offset) +{ + const u32 sc_base = model_data->kbdev->gpu_props.num_cores <= KBASE_G71_SINGLE_MEMSYS_MAX_NUM_CORES ? + SC0_BASE_ONE_MEMSYS : + SC0_BASE_TWO_MEMSYS; + + return sc_base + counter_block_offset; +} + +/** + * memsys_single_counter() - calculate energy for a single Memory System performance counter. + * @model_data: pointer to GPU model data. + * @coeff: default value of coefficient for IPA group. + * @counter_block_offset: offset in bytes of the counter inside the block it belongs to. + * + * Return: Energy estimation for a single Memory System performance counter. + */ +static s64 kbase_g71_memsys_single_counter( + struct kbase_ipa_model_vinstr_data *model_data, + s32 coeff, + u32 counter_block_offset) +{ + return kbase_ipa_single_counter(model_data, coeff, + kbase_g71_power_model_get_memsys_counter(model_data, counter_block_offset)); +} + +/** + * sum_all_shader_cores() - calculate energy for a Shader Cores performance counter for all cores. + * @model_data: pointer to GPU model data. + * @coeff: default value of coefficient for IPA group. + * @counter_block_offset: offset in bytes of the counter inside the block it belongs to. + * + * Return: Energy estimation for a Shader Cores performance counter for all cores. + */ +static s64 kbase_g71_sum_all_shader_cores( + struct kbase_ipa_model_vinstr_data *model_data, + s32 coeff, + u32 counter_block_offset) +{ + return kbase_ipa_sum_all_shader_cores(model_data, coeff, + kbase_g71_power_model_get_sc_counter(model_data, counter_block_offset)); +} + +/** + * jm_single_counter() - calculate energy for a single Job Manager performance counter. + * @model_data: pointer to GPU model data. + * @coeff: default value of coefficient for IPA group. + * @counter_block_offset: offset in bytes of the counter inside the block it belongs to. + * + * Return: Energy estimation for a single Job Manager performance counter. + */ +static s64 kbase_g71_jm_single_counter( + struct kbase_ipa_model_vinstr_data *model_data, + s32 coeff, + u32 counter_block_offset) +{ + return kbase_ipa_single_counter(model_data, coeff, + kbase_g71_power_model_get_jm_counter(model_data, counter_block_offset)); +} + +/** Table of IPA group definitions. + * + * For each IPA group, this table defines a function to access the given performance block counter (or counters, + * if the operation needs to be iterated on multiple blocks) and calculate energy estimation. + */ +static const struct kbase_ipa_group ipa_groups_def[] = { + { + .name = "l2_access", + .default_value = 526300, + .op = kbase_g71_memsys_single_counter, + .counter_block_offset = MEMSYS_L2_ANY_LOOKUP, + }, + { + .name = "exec_instr_count", + .default_value = 301100, + .op = kbase_g71_sum_all_shader_cores, + .counter_block_offset = SC_EXEC_INSTR_COUNT, + }, + { + .name = "tex_issue", + .default_value = 197400, + .op = kbase_g71_sum_all_shader_cores, + .counter_block_offset = SC_TEX_COORD_ISSUE, + }, + { + .name = "tile_wb", + .default_value = -156400, + .op = kbase_g71_sum_all_shader_cores, + .counter_block_offset = SC_BEATS_WR_TIB, + }, + { + .name = "gpu_active", + .default_value = 115800, + .op = kbase_g71_jm_single_counter, + .counter_block_offset = JM_GPU_ACTIVE, + }, +}; + +static int kbase_g71_power_model_init(struct kbase_ipa_model *model) +{ + int i, err = 0; + struct kbase_ipa_model_vinstr_data *model_data; + + model_data = kzalloc(sizeof(*model_data), GFP_KERNEL); + if (!model_data) + return -ENOMEM; + + model_data->kbdev = model->kbdev; + model_data->groups_def = ipa_groups_def; + BUILD_BUG_ON(ARRAY_SIZE(ipa_groups_def) > KBASE_IPA_MAX_GROUP_DEF_NUM); + model_data->groups_def_num = ARRAY_SIZE(ipa_groups_def); + + model->model_data = (void *) model_data; + + for (i = 0; i < ARRAY_SIZE(ipa_groups_def); ++i) { + const struct kbase_ipa_group *group = &ipa_groups_def[i]; + + model_data->group_values[i] = group->default_value; + err = kbase_ipa_model_add_param_s32(model, group->name, + &model_data->group_values[i], + 1, false); + if (err) + goto exit; + } + + model_data->scaling_factor = 5; + err = kbase_ipa_model_add_param_s32(model, "scale", + &model_data->scaling_factor, + 1, false); + if (err) + goto exit; + + err = kbase_ipa_attach_vinstr(model_data); + +exit: + if (err) { + kbase_ipa_model_param_free_all(model); + kfree(model_data); + } + return err; +} + +static void kbase_g71_power_model_term(struct kbase_ipa_model *model) +{ + struct kbase_ipa_model_vinstr_data *model_data = + (struct kbase_ipa_model_vinstr_data *)model->model_data; + + kbase_ipa_detach_vinstr(model_data); + kfree(model_data); +} + + +struct kbase_ipa_model_ops kbase_g71_ipa_model_ops = { + .name = "mali-g71-power-model", + .init = kbase_g71_power_model_init, + .term = kbase_g71_power_model_term, + .get_dynamic_coeff = kbase_ipa_vinstr_dynamic_coeff, + .do_utilization_scaling_in_framework = false, +}; +KBASE_EXPORT_TEST_API(kbase_g71_ipa_model_ops); diff --git a/drivers/gpu/arm/midgard/mali_base_hwconfig_features.h b/drivers/gpu/arm/midgard/mali_base_hwconfig_features.h new file mode 100644 index 00000000000000..c0774613b7a3e1 --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_base_hwconfig_features.h @@ -0,0 +1,432 @@ +/* + * + * (C) COPYRIGHT 2015-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +/* AUTOMATICALLY GENERATED FILE. If you want to amend the issues/features, + * please update base/tools/hwconfig_generator/hwc_{issues,features}.py + * For more information see base/tools/hwconfig_generator/README + */ + +#ifndef _BASE_HWCONFIG_FEATURES_H_ +#define _BASE_HWCONFIG_FEATURES_H_ + +enum base_hw_feature { + BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION, + BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS, + BASE_HW_FEATURE_33BIT_VA, + BASE_HW_FEATURE_XAFFINITY, + BASE_HW_FEATURE_OUT_OF_ORDER_EXEC, + BASE_HW_FEATURE_MRT, + BASE_HW_FEATURE_BRNDOUT_CC, + BASE_HW_FEATURE_INTERPIPE_REG_ALIASING, + BASE_HW_FEATURE_LD_ST_TILEBUFFER, + BASE_HW_FEATURE_MSAA_16X, + BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS, + BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL, + BASE_HW_FEATURE_OPTIMIZED_COVERAGE_MASK, + BASE_HW_FEATURE_T7XX_PAIRING_RULES, + BASE_HW_FEATURE_LD_ST_LEA_TEX, + BASE_HW_FEATURE_LINEAR_FILTER_FLOAT, + BASE_HW_FEATURE_WORKGROUP_ROUND_MULTIPLE_OF_4, + BASE_HW_FEATURE_IMAGES_IN_FRAGMENT_SHADERS, + BASE_HW_FEATURE_TEST4_DATUM_MODE, + BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE, + BASE_HW_FEATURE_BRNDOUT_KILL, + BASE_HW_FEATURE_WARPING, + BASE_HW_FEATURE_V4, + BASE_HW_FEATURE_FLUSH_REDUCTION, + BASE_HW_FEATURE_PROTECTED_MODE, + BASE_HW_FEATURE_COHERENCY_REG, + BASE_HW_FEATURE_PROTECTED_DEBUG_MODE, + BASE_HW_FEATURE_AARCH64_MMU, + BASE_HW_FEATURE_TLS_HASHING, + BASE_HW_FEATURE_THREAD_GROUP_SPLIT, + BASE_HW_FEATURE_3BIT_EXT_RW_L2_MMU_CONFIG, + BASE_HW_FEATURE_END +}; + +static const enum base_hw_feature base_hw_features_generic[] = { + BASE_HW_FEATURE_END +}; + +static const enum base_hw_feature base_hw_features_t60x[] = { + BASE_HW_FEATURE_LD_ST_LEA_TEX, + BASE_HW_FEATURE_LINEAR_FILTER_FLOAT, + BASE_HW_FEATURE_THREAD_GROUP_SPLIT, + BASE_HW_FEATURE_V4, + BASE_HW_FEATURE_END +}; + +static const enum base_hw_feature base_hw_features_t62x[] = { + BASE_HW_FEATURE_LD_ST_LEA_TEX, + BASE_HW_FEATURE_LINEAR_FILTER_FLOAT, + BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL, + BASE_HW_FEATURE_THREAD_GROUP_SPLIT, + BASE_HW_FEATURE_V4, + BASE_HW_FEATURE_END +}; + +static const enum base_hw_feature base_hw_features_t72x[] = { + BASE_HW_FEATURE_33BIT_VA, + BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS, + BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL, + BASE_HW_FEATURE_INTERPIPE_REG_ALIASING, + BASE_HW_FEATURE_OPTIMIZED_COVERAGE_MASK, + BASE_HW_FEATURE_T7XX_PAIRING_RULES, + BASE_HW_FEATURE_THREAD_GROUP_SPLIT, + BASE_HW_FEATURE_WORKGROUP_ROUND_MULTIPLE_OF_4, + BASE_HW_FEATURE_WARPING, + BASE_HW_FEATURE_V4, + BASE_HW_FEATURE_END +}; + +static const enum base_hw_feature base_hw_features_t76x[] = { + BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION, + BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS, + BASE_HW_FEATURE_XAFFINITY, + BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS, + BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL, + BASE_HW_FEATURE_BRNDOUT_CC, + BASE_HW_FEATURE_LD_ST_LEA_TEX, + BASE_HW_FEATURE_LD_ST_TILEBUFFER, + BASE_HW_FEATURE_LINEAR_FILTER_FLOAT, + BASE_HW_FEATURE_MRT, + BASE_HW_FEATURE_MSAA_16X, + BASE_HW_FEATURE_OUT_OF_ORDER_EXEC, + BASE_HW_FEATURE_T7XX_PAIRING_RULES, + BASE_HW_FEATURE_TEST4_DATUM_MODE, + BASE_HW_FEATURE_THREAD_GROUP_SPLIT, + BASE_HW_FEATURE_END +}; + +static const enum base_hw_feature base_hw_features_tFxx[] = { + BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION, + BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS, + BASE_HW_FEATURE_XAFFINITY, + BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS, + BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL, + BASE_HW_FEATURE_BRNDOUT_CC, + BASE_HW_FEATURE_BRNDOUT_KILL, + BASE_HW_FEATURE_LD_ST_LEA_TEX, + BASE_HW_FEATURE_LD_ST_TILEBUFFER, + BASE_HW_FEATURE_LINEAR_FILTER_FLOAT, + BASE_HW_FEATURE_MRT, + BASE_HW_FEATURE_MSAA_16X, + BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE, + BASE_HW_FEATURE_OUT_OF_ORDER_EXEC, + BASE_HW_FEATURE_T7XX_PAIRING_RULES, + BASE_HW_FEATURE_TEST4_DATUM_MODE, + BASE_HW_FEATURE_THREAD_GROUP_SPLIT, + BASE_HW_FEATURE_END +}; + +static const enum base_hw_feature base_hw_features_t83x[] = { + BASE_HW_FEATURE_33BIT_VA, + BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION, + BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS, + BASE_HW_FEATURE_XAFFINITY, + BASE_HW_FEATURE_WARPING, + BASE_HW_FEATURE_INTERPIPE_REG_ALIASING, + BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS, + BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL, + BASE_HW_FEATURE_BRNDOUT_CC, + BASE_HW_FEATURE_BRNDOUT_KILL, + BASE_HW_FEATURE_LD_ST_LEA_TEX, + BASE_HW_FEATURE_LD_ST_TILEBUFFER, + BASE_HW_FEATURE_LINEAR_FILTER_FLOAT, + BASE_HW_FEATURE_MRT, + BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE, + BASE_HW_FEATURE_OUT_OF_ORDER_EXEC, + BASE_HW_FEATURE_T7XX_PAIRING_RULES, + BASE_HW_FEATURE_TEST4_DATUM_MODE, + BASE_HW_FEATURE_THREAD_GROUP_SPLIT, + BASE_HW_FEATURE_END +}; + +static const enum base_hw_feature base_hw_features_t82x[] = { + BASE_HW_FEATURE_33BIT_VA, + BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION, + BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS, + BASE_HW_FEATURE_XAFFINITY, + BASE_HW_FEATURE_WARPING, + BASE_HW_FEATURE_INTERPIPE_REG_ALIASING, + BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS, + BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL, + BASE_HW_FEATURE_BRNDOUT_CC, + BASE_HW_FEATURE_BRNDOUT_KILL, + BASE_HW_FEATURE_LD_ST_LEA_TEX, + BASE_HW_FEATURE_LD_ST_TILEBUFFER, + BASE_HW_FEATURE_LINEAR_FILTER_FLOAT, + BASE_HW_FEATURE_MRT, + BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE, + BASE_HW_FEATURE_OUT_OF_ORDER_EXEC, + BASE_HW_FEATURE_T7XX_PAIRING_RULES, + BASE_HW_FEATURE_TEST4_DATUM_MODE, + BASE_HW_FEATURE_THREAD_GROUP_SPLIT, + BASE_HW_FEATURE_END +}; + +static const enum base_hw_feature base_hw_features_tMIx[] = { + BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION, + BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS, + BASE_HW_FEATURE_XAFFINITY, + BASE_HW_FEATURE_WARPING, + BASE_HW_FEATURE_INTERPIPE_REG_ALIASING, + BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS, + BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL, + BASE_HW_FEATURE_BRNDOUT_CC, + BASE_HW_FEATURE_BRNDOUT_KILL, + BASE_HW_FEATURE_LD_ST_LEA_TEX, + BASE_HW_FEATURE_LD_ST_TILEBUFFER, + BASE_HW_FEATURE_LINEAR_FILTER_FLOAT, + BASE_HW_FEATURE_MRT, + BASE_HW_FEATURE_MSAA_16X, + BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE, + BASE_HW_FEATURE_OUT_OF_ORDER_EXEC, + BASE_HW_FEATURE_T7XX_PAIRING_RULES, + BASE_HW_FEATURE_TEST4_DATUM_MODE, + BASE_HW_FEATURE_THREAD_GROUP_SPLIT, + BASE_HW_FEATURE_FLUSH_REDUCTION, + BASE_HW_FEATURE_PROTECTED_MODE, + BASE_HW_FEATURE_COHERENCY_REG, + BASE_HW_FEATURE_END +}; + +static const enum base_hw_feature base_hw_features_tHEx[] = { + BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION, + BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS, + BASE_HW_FEATURE_XAFFINITY, + BASE_HW_FEATURE_WARPING, + BASE_HW_FEATURE_INTERPIPE_REG_ALIASING, + BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS, + BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL, + BASE_HW_FEATURE_BRNDOUT_CC, + BASE_HW_FEATURE_BRNDOUT_KILL, + BASE_HW_FEATURE_LD_ST_LEA_TEX, + BASE_HW_FEATURE_LD_ST_TILEBUFFER, + BASE_HW_FEATURE_LINEAR_FILTER_FLOAT, + BASE_HW_FEATURE_MRT, + BASE_HW_FEATURE_MSAA_16X, + BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE, + BASE_HW_FEATURE_OUT_OF_ORDER_EXEC, + BASE_HW_FEATURE_T7XX_PAIRING_RULES, + BASE_HW_FEATURE_TEST4_DATUM_MODE, + BASE_HW_FEATURE_THREAD_GROUP_SPLIT, + BASE_HW_FEATURE_FLUSH_REDUCTION, + BASE_HW_FEATURE_PROTECTED_MODE, + BASE_HW_FEATURE_PROTECTED_DEBUG_MODE, + BASE_HW_FEATURE_COHERENCY_REG, + BASE_HW_FEATURE_END +}; + +static const enum base_hw_feature base_hw_features_tSIx[] = { + BASE_HW_FEATURE_33BIT_VA, + BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION, + BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS, + BASE_HW_FEATURE_XAFFINITY, + BASE_HW_FEATURE_WARPING, + BASE_HW_FEATURE_INTERPIPE_REG_ALIASING, + BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS, + BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL, + BASE_HW_FEATURE_BRNDOUT_CC, + BASE_HW_FEATURE_BRNDOUT_KILL, + BASE_HW_FEATURE_LD_ST_LEA_TEX, + BASE_HW_FEATURE_LD_ST_TILEBUFFER, + BASE_HW_FEATURE_LINEAR_FILTER_FLOAT, + BASE_HW_FEATURE_MRT, + BASE_HW_FEATURE_MSAA_16X, + BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE, + BASE_HW_FEATURE_OUT_OF_ORDER_EXEC, + BASE_HW_FEATURE_T7XX_PAIRING_RULES, + BASE_HW_FEATURE_TEST4_DATUM_MODE, + BASE_HW_FEATURE_THREAD_GROUP_SPLIT, + BASE_HW_FEATURE_FLUSH_REDUCTION, + BASE_HW_FEATURE_PROTECTED_MODE, + BASE_HW_FEATURE_PROTECTED_DEBUG_MODE, + BASE_HW_FEATURE_COHERENCY_REG, + BASE_HW_FEATURE_END +}; + +static const enum base_hw_feature base_hw_features_tDVx[] = { + BASE_HW_FEATURE_33BIT_VA, + BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION, + BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS, + BASE_HW_FEATURE_XAFFINITY, + BASE_HW_FEATURE_WARPING, + BASE_HW_FEATURE_INTERPIPE_REG_ALIASING, + BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS, + BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL, + BASE_HW_FEATURE_BRNDOUT_CC, + BASE_HW_FEATURE_BRNDOUT_KILL, + BASE_HW_FEATURE_LD_ST_LEA_TEX, + BASE_HW_FEATURE_LD_ST_TILEBUFFER, + BASE_HW_FEATURE_LINEAR_FILTER_FLOAT, + BASE_HW_FEATURE_MRT, + BASE_HW_FEATURE_MSAA_16X, + BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE, + BASE_HW_FEATURE_OUT_OF_ORDER_EXEC, + BASE_HW_FEATURE_T7XX_PAIRING_RULES, + BASE_HW_FEATURE_TEST4_DATUM_MODE, + BASE_HW_FEATURE_THREAD_GROUP_SPLIT, + BASE_HW_FEATURE_FLUSH_REDUCTION, + BASE_HW_FEATURE_PROTECTED_MODE, + BASE_HW_FEATURE_PROTECTED_DEBUG_MODE, + BASE_HW_FEATURE_COHERENCY_REG, + BASE_HW_FEATURE_END +}; + +static const enum base_hw_feature base_hw_features_tNOx[] = { + BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION, + BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS, + BASE_HW_FEATURE_XAFFINITY, + BASE_HW_FEATURE_WARPING, + BASE_HW_FEATURE_INTERPIPE_REG_ALIASING, + BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS, + BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL, + BASE_HW_FEATURE_BRNDOUT_CC, + BASE_HW_FEATURE_BRNDOUT_KILL, + BASE_HW_FEATURE_LD_ST_LEA_TEX, + BASE_HW_FEATURE_LD_ST_TILEBUFFER, + BASE_HW_FEATURE_LINEAR_FILTER_FLOAT, + BASE_HW_FEATURE_MRT, + BASE_HW_FEATURE_MSAA_16X, + BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE, + BASE_HW_FEATURE_OUT_OF_ORDER_EXEC, + BASE_HW_FEATURE_T7XX_PAIRING_RULES, + BASE_HW_FEATURE_TEST4_DATUM_MODE, + BASE_HW_FEATURE_THREAD_GROUP_SPLIT, + BASE_HW_FEATURE_FLUSH_REDUCTION, + BASE_HW_FEATURE_PROTECTED_MODE, + BASE_HW_FEATURE_PROTECTED_DEBUG_MODE, + BASE_HW_FEATURE_COHERENCY_REG, + BASE_HW_FEATURE_AARCH64_MMU, + BASE_HW_FEATURE_TLS_HASHING, + BASE_HW_FEATURE_3BIT_EXT_RW_L2_MMU_CONFIG, + BASE_HW_FEATURE_END +}; + +static const enum base_hw_feature base_hw_features_tGOx[] = { + BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION, + BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS, + BASE_HW_FEATURE_XAFFINITY, + BASE_HW_FEATURE_WARPING, + BASE_HW_FEATURE_INTERPIPE_REG_ALIASING, + BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS, + BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL, + BASE_HW_FEATURE_BRNDOUT_CC, + BASE_HW_FEATURE_BRNDOUT_KILL, + BASE_HW_FEATURE_LD_ST_LEA_TEX, + BASE_HW_FEATURE_LD_ST_TILEBUFFER, + BASE_HW_FEATURE_LINEAR_FILTER_FLOAT, + BASE_HW_FEATURE_MRT, + BASE_HW_FEATURE_MSAA_16X, + BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE, + BASE_HW_FEATURE_OUT_OF_ORDER_EXEC, + BASE_HW_FEATURE_T7XX_PAIRING_RULES, + BASE_HW_FEATURE_TEST4_DATUM_MODE, + BASE_HW_FEATURE_THREAD_GROUP_SPLIT, + BASE_HW_FEATURE_FLUSH_REDUCTION, + BASE_HW_FEATURE_PROTECTED_MODE, + BASE_HW_FEATURE_PROTECTED_DEBUG_MODE, + BASE_HW_FEATURE_COHERENCY_REG, + BASE_HW_FEATURE_AARCH64_MMU, + BASE_HW_FEATURE_TLS_HASHING, + BASE_HW_FEATURE_3BIT_EXT_RW_L2_MMU_CONFIG, + BASE_HW_FEATURE_END +}; + +static const enum base_hw_feature base_hw_features_tKAx[] = { + BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION, + BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS, + BASE_HW_FEATURE_XAFFINITY, + BASE_HW_FEATURE_WARPING, + BASE_HW_FEATURE_INTERPIPE_REG_ALIASING, + BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS, + BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL, + BASE_HW_FEATURE_BRNDOUT_CC, + BASE_HW_FEATURE_BRNDOUT_KILL, + BASE_HW_FEATURE_LD_ST_LEA_TEX, + BASE_HW_FEATURE_LD_ST_TILEBUFFER, + BASE_HW_FEATURE_LINEAR_FILTER_FLOAT, + BASE_HW_FEATURE_MRT, + BASE_HW_FEATURE_MSAA_16X, + BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE, + BASE_HW_FEATURE_OUT_OF_ORDER_EXEC, + BASE_HW_FEATURE_T7XX_PAIRING_RULES, + BASE_HW_FEATURE_TEST4_DATUM_MODE, + BASE_HW_FEATURE_FLUSH_REDUCTION, + BASE_HW_FEATURE_PROTECTED_MODE, + BASE_HW_FEATURE_PROTECTED_DEBUG_MODE, + BASE_HW_FEATURE_COHERENCY_REG, + BASE_HW_FEATURE_AARCH64_MMU, + BASE_HW_FEATURE_END +}; + +static const enum base_hw_feature base_hw_features_tTRx[] = { + BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION, + BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS, + BASE_HW_FEATURE_XAFFINITY, + BASE_HW_FEATURE_WARPING, + BASE_HW_FEATURE_INTERPIPE_REG_ALIASING, + BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS, + BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL, + BASE_HW_FEATURE_BRNDOUT_CC, + BASE_HW_FEATURE_BRNDOUT_KILL, + BASE_HW_FEATURE_LD_ST_LEA_TEX, + BASE_HW_FEATURE_LD_ST_TILEBUFFER, + BASE_HW_FEATURE_LINEAR_FILTER_FLOAT, + BASE_HW_FEATURE_MRT, + BASE_HW_FEATURE_MSAA_16X, + BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE, + BASE_HW_FEATURE_OUT_OF_ORDER_EXEC, + BASE_HW_FEATURE_T7XX_PAIRING_RULES, + BASE_HW_FEATURE_TEST4_DATUM_MODE, + BASE_HW_FEATURE_FLUSH_REDUCTION, + BASE_HW_FEATURE_PROTECTED_MODE, + BASE_HW_FEATURE_PROTECTED_DEBUG_MODE, + BASE_HW_FEATURE_COHERENCY_REG, + BASE_HW_FEATURE_AARCH64_MMU, + BASE_HW_FEATURE_END +}; + +static const enum base_hw_feature base_hw_features_tBOx[] = { + BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION, + BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS, + BASE_HW_FEATURE_XAFFINITY, + BASE_HW_FEATURE_WARPING, + BASE_HW_FEATURE_INTERPIPE_REG_ALIASING, + BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS, + BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL, + BASE_HW_FEATURE_BRNDOUT_CC, + BASE_HW_FEATURE_BRNDOUT_KILL, + BASE_HW_FEATURE_LD_ST_LEA_TEX, + BASE_HW_FEATURE_LD_ST_TILEBUFFER, + BASE_HW_FEATURE_LINEAR_FILTER_FLOAT, + BASE_HW_FEATURE_MRT, + BASE_HW_FEATURE_MSAA_16X, + BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE, + BASE_HW_FEATURE_OUT_OF_ORDER_EXEC, + BASE_HW_FEATURE_T7XX_PAIRING_RULES, + BASE_HW_FEATURE_TEST4_DATUM_MODE, + BASE_HW_FEATURE_FLUSH_REDUCTION, + BASE_HW_FEATURE_PROTECTED_MODE, + BASE_HW_FEATURE_PROTECTED_DEBUG_MODE, + BASE_HW_FEATURE_COHERENCY_REG, + BASE_HW_FEATURE_AARCH64_MMU, + BASE_HW_FEATURE_END +}; + +#endif /* _BASE_HWCONFIG_FEATURES_H_ */ diff --git a/drivers/gpu/arm/midgard/mali_base_hwconfig_issues.h b/drivers/gpu/arm/midgard/mali_base_hwconfig_issues.h new file mode 100644 index 00000000000000..58710f692a1c89 --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_base_hwconfig_issues.h @@ -0,0 +1,1193 @@ +/* + * + * (C) COPYRIGHT 2015-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +/* AUTOMATICALLY GENERATED FILE. If you want to amend the issues/features, + * please update base/tools/hwconfig_generator/hwc_{issues,features}.py + * For more information see base/tools/hwconfig_generator/README + */ + +#ifndef _BASE_HWCONFIG_ISSUES_H_ +#define _BASE_HWCONFIG_ISSUES_H_ + +enum base_hw_issue { + BASE_HW_ISSUE_5736, + BASE_HW_ISSUE_6367, + BASE_HW_ISSUE_6398, + BASE_HW_ISSUE_6402, + BASE_HW_ISSUE_6787, + BASE_HW_ISSUE_7027, + BASE_HW_ISSUE_7144, + BASE_HW_ISSUE_7304, + BASE_HW_ISSUE_8073, + BASE_HW_ISSUE_8186, + BASE_HW_ISSUE_8215, + BASE_HW_ISSUE_8245, + BASE_HW_ISSUE_8250, + BASE_HW_ISSUE_8260, + BASE_HW_ISSUE_8280, + BASE_HW_ISSUE_8316, + BASE_HW_ISSUE_8381, + BASE_HW_ISSUE_8394, + BASE_HW_ISSUE_8401, + BASE_HW_ISSUE_8408, + BASE_HW_ISSUE_8443, + BASE_HW_ISSUE_8456, + BASE_HW_ISSUE_8564, + BASE_HW_ISSUE_8634, + BASE_HW_ISSUE_8778, + BASE_HW_ISSUE_8791, + BASE_HW_ISSUE_8833, + BASE_HW_ISSUE_8879, + BASE_HW_ISSUE_8896, + BASE_HW_ISSUE_8975, + BASE_HW_ISSUE_8986, + BASE_HW_ISSUE_8987, + BASE_HW_ISSUE_9010, + BASE_HW_ISSUE_9418, + BASE_HW_ISSUE_9423, + BASE_HW_ISSUE_9435, + BASE_HW_ISSUE_9510, + BASE_HW_ISSUE_9566, + BASE_HW_ISSUE_9630, + BASE_HW_ISSUE_10127, + BASE_HW_ISSUE_10327, + BASE_HW_ISSUE_10410, + BASE_HW_ISSUE_10471, + BASE_HW_ISSUE_10472, + BASE_HW_ISSUE_10487, + BASE_HW_ISSUE_10607, + BASE_HW_ISSUE_10632, + BASE_HW_ISSUE_10649, + BASE_HW_ISSUE_10676, + BASE_HW_ISSUE_10682, + BASE_HW_ISSUE_10684, + BASE_HW_ISSUE_10797, + BASE_HW_ISSUE_10817, + BASE_HW_ISSUE_10821, + BASE_HW_ISSUE_10883, + BASE_HW_ISSUE_10931, + BASE_HW_ISSUE_10946, + BASE_HW_ISSUE_10959, + BASE_HW_ISSUE_10969, + BASE_HW_ISSUE_10984, + BASE_HW_ISSUE_10995, + BASE_HW_ISSUE_11012, + BASE_HW_ISSUE_11020, + BASE_HW_ISSUE_11024, + BASE_HW_ISSUE_11035, + BASE_HW_ISSUE_11042, + BASE_HW_ISSUE_11051, + BASE_HW_ISSUE_11054, + BASE_HW_ISSUE_T720_1386, + BASE_HW_ISSUE_T76X_26, + BASE_HW_ISSUE_T76X_1909, + BASE_HW_ISSUE_T76X_1963, + BASE_HW_ISSUE_T76X_3086, + BASE_HW_ISSUE_T76X_3542, + BASE_HW_ISSUE_T76X_3556, + BASE_HW_ISSUE_T76X_3700, + BASE_HW_ISSUE_T76X_3793, + BASE_HW_ISSUE_T76X_3953, + BASE_HW_ISSUE_T76X_3960, + BASE_HW_ISSUE_T76X_3964, + BASE_HW_ISSUE_T76X_3966, + BASE_HW_ISSUE_T76X_3979, + BASE_HW_ISSUE_TMIX_7891, + BASE_HW_ISSUE_TMIX_7940, + BASE_HW_ISSUE_TMIX_8042, + BASE_HW_ISSUE_TMIX_8133, + BASE_HW_ISSUE_TMIX_8138, + BASE_HW_ISSUE_TMIX_8206, + BASE_HW_ISSUE_TMIX_8343, + BASE_HW_ISSUE_TMIX_8463, + BASE_HW_ISSUE_TMIX_8456, + GPUCORE_1619, + BASE_HW_ISSUE_TSIX_1116, + BASE_HW_ISSUE_TMIX_8438, + BASE_HW_ISSUE_END +}; + +static const enum base_hw_issue base_hw_issues_generic[] = { + BASE_HW_ISSUE_END +}; + +static const enum base_hw_issue base_hw_issues_t60x_r0p0_15dev0[] = { + BASE_HW_ISSUE_6367, + BASE_HW_ISSUE_6398, + BASE_HW_ISSUE_6402, + BASE_HW_ISSUE_6787, + BASE_HW_ISSUE_7027, + BASE_HW_ISSUE_7144, + BASE_HW_ISSUE_7304, + BASE_HW_ISSUE_8073, + BASE_HW_ISSUE_8186, + BASE_HW_ISSUE_8215, + BASE_HW_ISSUE_8245, + BASE_HW_ISSUE_8250, + BASE_HW_ISSUE_8260, + BASE_HW_ISSUE_8280, + BASE_HW_ISSUE_8316, + BASE_HW_ISSUE_8381, + BASE_HW_ISSUE_8394, + BASE_HW_ISSUE_8401, + BASE_HW_ISSUE_8408, + BASE_HW_ISSUE_8443, + BASE_HW_ISSUE_8456, + BASE_HW_ISSUE_8564, + BASE_HW_ISSUE_8634, + BASE_HW_ISSUE_8778, + BASE_HW_ISSUE_8791, + BASE_HW_ISSUE_8833, + BASE_HW_ISSUE_8896, + BASE_HW_ISSUE_8975, + BASE_HW_ISSUE_8986, + BASE_HW_ISSUE_8987, + BASE_HW_ISSUE_9010, + BASE_HW_ISSUE_9418, + BASE_HW_ISSUE_9423, + BASE_HW_ISSUE_9435, + BASE_HW_ISSUE_9510, + BASE_HW_ISSUE_9566, + BASE_HW_ISSUE_9630, + BASE_HW_ISSUE_10410, + BASE_HW_ISSUE_10471, + BASE_HW_ISSUE_10472, + BASE_HW_ISSUE_10487, + BASE_HW_ISSUE_10607, + BASE_HW_ISSUE_10632, + BASE_HW_ISSUE_10649, + BASE_HW_ISSUE_10676, + BASE_HW_ISSUE_10682, + BASE_HW_ISSUE_10684, + BASE_HW_ISSUE_10883, + BASE_HW_ISSUE_10931, + BASE_HW_ISSUE_10946, + BASE_HW_ISSUE_10969, + BASE_HW_ISSUE_10984, + BASE_HW_ISSUE_10995, + BASE_HW_ISSUE_11012, + BASE_HW_ISSUE_11020, + BASE_HW_ISSUE_11035, + BASE_HW_ISSUE_11051, + BASE_HW_ISSUE_11054, + BASE_HW_ISSUE_T76X_1909, + BASE_HW_ISSUE_T76X_3964, + GPUCORE_1619, + BASE_HW_ISSUE_TMIX_8438, + BASE_HW_ISSUE_END +}; + +static const enum base_hw_issue base_hw_issues_t60x_r0p0_eac[] = { + BASE_HW_ISSUE_6367, + BASE_HW_ISSUE_6402, + BASE_HW_ISSUE_6787, + BASE_HW_ISSUE_7027, + BASE_HW_ISSUE_7304, + BASE_HW_ISSUE_8408, + BASE_HW_ISSUE_8564, + BASE_HW_ISSUE_8778, + BASE_HW_ISSUE_8975, + BASE_HW_ISSUE_9010, + BASE_HW_ISSUE_9418, + BASE_HW_ISSUE_9423, + BASE_HW_ISSUE_9435, + BASE_HW_ISSUE_9510, + BASE_HW_ISSUE_10410, + BASE_HW_ISSUE_10471, + BASE_HW_ISSUE_10472, + BASE_HW_ISSUE_10487, + BASE_HW_ISSUE_10607, + BASE_HW_ISSUE_10632, + BASE_HW_ISSUE_10649, + BASE_HW_ISSUE_10676, + BASE_HW_ISSUE_10682, + BASE_HW_ISSUE_10684, + BASE_HW_ISSUE_10883, + BASE_HW_ISSUE_10931, + BASE_HW_ISSUE_10946, + BASE_HW_ISSUE_10969, + BASE_HW_ISSUE_11012, + BASE_HW_ISSUE_11020, + BASE_HW_ISSUE_11035, + BASE_HW_ISSUE_11051, + BASE_HW_ISSUE_11054, + BASE_HW_ISSUE_T76X_1909, + BASE_HW_ISSUE_T76X_3964, + BASE_HW_ISSUE_TMIX_8438, + BASE_HW_ISSUE_END +}; + +static const enum base_hw_issue base_hw_issues_t60x_r0p1[] = { + BASE_HW_ISSUE_6367, + BASE_HW_ISSUE_6402, + BASE_HW_ISSUE_6787, + BASE_HW_ISSUE_7027, + BASE_HW_ISSUE_7304, + BASE_HW_ISSUE_8408, + BASE_HW_ISSUE_8564, + BASE_HW_ISSUE_8778, + BASE_HW_ISSUE_8975, + BASE_HW_ISSUE_9010, + BASE_HW_ISSUE_9435, + BASE_HW_ISSUE_9510, + BASE_HW_ISSUE_10410, + BASE_HW_ISSUE_10471, + BASE_HW_ISSUE_10472, + BASE_HW_ISSUE_10487, + BASE_HW_ISSUE_10607, + BASE_HW_ISSUE_10632, + BASE_HW_ISSUE_10649, + BASE_HW_ISSUE_10676, + BASE_HW_ISSUE_10682, + BASE_HW_ISSUE_10684, + BASE_HW_ISSUE_10883, + BASE_HW_ISSUE_10931, + BASE_HW_ISSUE_10946, + BASE_HW_ISSUE_11012, + BASE_HW_ISSUE_11020, + BASE_HW_ISSUE_11035, + BASE_HW_ISSUE_11051, + BASE_HW_ISSUE_11054, + BASE_HW_ISSUE_T76X_1909, + BASE_HW_ISSUE_T76X_1963, + BASE_HW_ISSUE_T76X_3964, + BASE_HW_ISSUE_TMIX_8438, + BASE_HW_ISSUE_END +}; + +static const enum base_hw_issue base_hw_issues_t62x_r0p1[] = { + BASE_HW_ISSUE_6402, + BASE_HW_ISSUE_9435, + BASE_HW_ISSUE_10127, + BASE_HW_ISSUE_10327, + BASE_HW_ISSUE_10410, + BASE_HW_ISSUE_10471, + BASE_HW_ISSUE_10472, + BASE_HW_ISSUE_10487, + BASE_HW_ISSUE_10607, + BASE_HW_ISSUE_10632, + BASE_HW_ISSUE_10649, + BASE_HW_ISSUE_10676, + BASE_HW_ISSUE_10682, + BASE_HW_ISSUE_10684, + BASE_HW_ISSUE_10817, + BASE_HW_ISSUE_10821, + BASE_HW_ISSUE_10883, + BASE_HW_ISSUE_10931, + BASE_HW_ISSUE_10946, + BASE_HW_ISSUE_10959, + BASE_HW_ISSUE_11012, + BASE_HW_ISSUE_11020, + BASE_HW_ISSUE_11024, + BASE_HW_ISSUE_11035, + BASE_HW_ISSUE_11042, + BASE_HW_ISSUE_11051, + BASE_HW_ISSUE_11054, + BASE_HW_ISSUE_T76X_1909, + BASE_HW_ISSUE_T76X_1963, + BASE_HW_ISSUE_TMIX_8438, + BASE_HW_ISSUE_END +}; + +static const enum base_hw_issue base_hw_issues_t62x_r1p0[] = { + BASE_HW_ISSUE_6402, + BASE_HW_ISSUE_9435, + BASE_HW_ISSUE_10471, + BASE_HW_ISSUE_10472, + BASE_HW_ISSUE_10649, + BASE_HW_ISSUE_10684, + BASE_HW_ISSUE_10821, + BASE_HW_ISSUE_10883, + BASE_HW_ISSUE_10931, + BASE_HW_ISSUE_10946, + BASE_HW_ISSUE_10959, + BASE_HW_ISSUE_11012, + BASE_HW_ISSUE_11020, + BASE_HW_ISSUE_11024, + BASE_HW_ISSUE_11042, + BASE_HW_ISSUE_11051, + BASE_HW_ISSUE_11054, + BASE_HW_ISSUE_T76X_1909, + BASE_HW_ISSUE_T76X_1963, + BASE_HW_ISSUE_T76X_3964, + BASE_HW_ISSUE_TMIX_8438, + BASE_HW_ISSUE_END +}; + +static const enum base_hw_issue base_hw_issues_t62x_r1p1[] = { + BASE_HW_ISSUE_6402, + BASE_HW_ISSUE_9435, + BASE_HW_ISSUE_10471, + BASE_HW_ISSUE_10472, + BASE_HW_ISSUE_10649, + BASE_HW_ISSUE_10684, + BASE_HW_ISSUE_10821, + BASE_HW_ISSUE_10883, + BASE_HW_ISSUE_10931, + BASE_HW_ISSUE_10946, + BASE_HW_ISSUE_10959, + BASE_HW_ISSUE_11012, + BASE_HW_ISSUE_11042, + BASE_HW_ISSUE_11051, + BASE_HW_ISSUE_11054, + BASE_HW_ISSUE_T76X_1909, + BASE_HW_ISSUE_T76X_1963, + BASE_HW_ISSUE_TMIX_8438, + BASE_HW_ISSUE_END +}; + +static const enum base_hw_issue base_hw_issues_t76x_r0p0[] = { + BASE_HW_ISSUE_9435, + BASE_HW_ISSUE_10821, + BASE_HW_ISSUE_10883, + BASE_HW_ISSUE_10946, + BASE_HW_ISSUE_11020, + BASE_HW_ISSUE_11024, + BASE_HW_ISSUE_11042, + BASE_HW_ISSUE_11051, + BASE_HW_ISSUE_11054, + BASE_HW_ISSUE_T76X_26, + BASE_HW_ISSUE_T76X_1909, + BASE_HW_ISSUE_T76X_1963, + BASE_HW_ISSUE_T76X_3086, + BASE_HW_ISSUE_T76X_3542, + BASE_HW_ISSUE_T76X_3556, + BASE_HW_ISSUE_T76X_3700, + BASE_HW_ISSUE_T76X_3793, + BASE_HW_ISSUE_T76X_3953, + BASE_HW_ISSUE_T76X_3960, + BASE_HW_ISSUE_T76X_3964, + BASE_HW_ISSUE_T76X_3966, + BASE_HW_ISSUE_T76X_3979, + BASE_HW_ISSUE_TMIX_7891, + BASE_HW_ISSUE_TMIX_8438, + BASE_HW_ISSUE_END +}; + +static const enum base_hw_issue base_hw_issues_t76x_r0p1[] = { + BASE_HW_ISSUE_9435, + BASE_HW_ISSUE_10821, + BASE_HW_ISSUE_10883, + BASE_HW_ISSUE_10946, + BASE_HW_ISSUE_11020, + BASE_HW_ISSUE_11024, + BASE_HW_ISSUE_11042, + BASE_HW_ISSUE_11051, + BASE_HW_ISSUE_11054, + BASE_HW_ISSUE_T76X_26, + BASE_HW_ISSUE_T76X_1909, + BASE_HW_ISSUE_T76X_1963, + BASE_HW_ISSUE_T76X_3086, + BASE_HW_ISSUE_T76X_3542, + BASE_HW_ISSUE_T76X_3556, + BASE_HW_ISSUE_T76X_3700, + BASE_HW_ISSUE_T76X_3793, + BASE_HW_ISSUE_T76X_3953, + BASE_HW_ISSUE_T76X_3960, + BASE_HW_ISSUE_T76X_3964, + BASE_HW_ISSUE_T76X_3966, + BASE_HW_ISSUE_T76X_3979, + BASE_HW_ISSUE_TMIX_7891, + BASE_HW_ISSUE_TMIX_8438, + BASE_HW_ISSUE_END +}; + +static const enum base_hw_issue base_hw_issues_t76x_r0p1_50rel0[] = { + BASE_HW_ISSUE_9435, + BASE_HW_ISSUE_10821, + BASE_HW_ISSUE_10883, + BASE_HW_ISSUE_10946, + BASE_HW_ISSUE_11042, + BASE_HW_ISSUE_11051, + BASE_HW_ISSUE_11054, + BASE_HW_ISSUE_T76X_26, + BASE_HW_ISSUE_T76X_1909, + BASE_HW_ISSUE_T76X_1963, + BASE_HW_ISSUE_T76X_3086, + BASE_HW_ISSUE_T76X_3542, + BASE_HW_ISSUE_T76X_3556, + BASE_HW_ISSUE_T76X_3700, + BASE_HW_ISSUE_T76X_3793, + BASE_HW_ISSUE_T76X_3953, + BASE_HW_ISSUE_T76X_3960, + BASE_HW_ISSUE_T76X_3964, + BASE_HW_ISSUE_T76X_3966, + BASE_HW_ISSUE_T76X_3979, + BASE_HW_ISSUE_TMIX_7891, + BASE_HW_ISSUE_TMIX_8438, + BASE_HW_ISSUE_END +}; + +static const enum base_hw_issue base_hw_issues_t76x_r0p2[] = { + BASE_HW_ISSUE_9435, + BASE_HW_ISSUE_10821, + BASE_HW_ISSUE_10883, + BASE_HW_ISSUE_10946, + BASE_HW_ISSUE_11020, + BASE_HW_ISSUE_11024, + BASE_HW_ISSUE_11042, + BASE_HW_ISSUE_11051, + BASE_HW_ISSUE_11054, + BASE_HW_ISSUE_T76X_26, + BASE_HW_ISSUE_T76X_1909, + BASE_HW_ISSUE_T76X_1963, + BASE_HW_ISSUE_T76X_3086, + BASE_HW_ISSUE_T76X_3542, + BASE_HW_ISSUE_T76X_3556, + BASE_HW_ISSUE_T76X_3700, + BASE_HW_ISSUE_T76X_3793, + BASE_HW_ISSUE_T76X_3953, + BASE_HW_ISSUE_T76X_3960, + BASE_HW_ISSUE_T76X_3964, + BASE_HW_ISSUE_T76X_3966, + BASE_HW_ISSUE_T76X_3979, + BASE_HW_ISSUE_TMIX_7891, + BASE_HW_ISSUE_TMIX_8438, + BASE_HW_ISSUE_END +}; + +static const enum base_hw_issue base_hw_issues_t76x_r0p3[] = { + BASE_HW_ISSUE_9435, + BASE_HW_ISSUE_10821, + BASE_HW_ISSUE_10883, + BASE_HW_ISSUE_10946, + BASE_HW_ISSUE_11042, + BASE_HW_ISSUE_11051, + BASE_HW_ISSUE_11054, + BASE_HW_ISSUE_T76X_26, + BASE_HW_ISSUE_T76X_1909, + BASE_HW_ISSUE_T76X_1963, + BASE_HW_ISSUE_T76X_3086, + BASE_HW_ISSUE_T76X_3542, + BASE_HW_ISSUE_T76X_3556, + BASE_HW_ISSUE_T76X_3700, + BASE_HW_ISSUE_T76X_3793, + BASE_HW_ISSUE_T76X_3953, + BASE_HW_ISSUE_T76X_3960, + BASE_HW_ISSUE_T76X_3964, + BASE_HW_ISSUE_T76X_3966, + BASE_HW_ISSUE_T76X_3979, + BASE_HW_ISSUE_TMIX_7891, + BASE_HW_ISSUE_TMIX_8438, + BASE_HW_ISSUE_END +}; + +static const enum base_hw_issue base_hw_issues_t76x_r1p0[] = { + BASE_HW_ISSUE_9435, + BASE_HW_ISSUE_10821, + BASE_HW_ISSUE_10883, + BASE_HW_ISSUE_10946, + BASE_HW_ISSUE_11042, + BASE_HW_ISSUE_11051, + BASE_HW_ISSUE_11054, + BASE_HW_ISSUE_T76X_1909, + BASE_HW_ISSUE_T76X_1963, + BASE_HW_ISSUE_T76X_3086, + BASE_HW_ISSUE_T76X_3700, + BASE_HW_ISSUE_T76X_3793, + BASE_HW_ISSUE_T76X_3953, + BASE_HW_ISSUE_T76X_3960, + BASE_HW_ISSUE_T76X_3964, + BASE_HW_ISSUE_T76X_3966, + BASE_HW_ISSUE_T76X_3979, + BASE_HW_ISSUE_TMIX_7891, + BASE_HW_ISSUE_TMIX_8438, + BASE_HW_ISSUE_END +}; + +static const enum base_hw_issue base_hw_issues_t72x_r0p0[] = { + BASE_HW_ISSUE_6402, + BASE_HW_ISSUE_9435, + BASE_HW_ISSUE_10471, + BASE_HW_ISSUE_10649, + BASE_HW_ISSUE_10684, + BASE_HW_ISSUE_10797, + BASE_HW_ISSUE_10821, + BASE_HW_ISSUE_10883, + BASE_HW_ISSUE_10946, + BASE_HW_ISSUE_11042, + BASE_HW_ISSUE_11051, + BASE_HW_ISSUE_11054, + BASE_HW_ISSUE_T76X_1909, + BASE_HW_ISSUE_T76X_1963, + BASE_HW_ISSUE_T76X_3964, + BASE_HW_ISSUE_TMIX_8438, + BASE_HW_ISSUE_END +}; + +static const enum base_hw_issue base_hw_issues_t72x_r1p0[] = { + BASE_HW_ISSUE_6402, + BASE_HW_ISSUE_9435, + BASE_HW_ISSUE_10471, + BASE_HW_ISSUE_10649, + BASE_HW_ISSUE_10684, + BASE_HW_ISSUE_10797, + BASE_HW_ISSUE_10821, + BASE_HW_ISSUE_10883, + BASE_HW_ISSUE_10946, + BASE_HW_ISSUE_11042, + BASE_HW_ISSUE_11051, + BASE_HW_ISSUE_11054, + BASE_HW_ISSUE_T720_1386, + BASE_HW_ISSUE_T76X_1909, + BASE_HW_ISSUE_T76X_1963, + BASE_HW_ISSUE_T76X_3964, + BASE_HW_ISSUE_TMIX_8438, + BASE_HW_ISSUE_END +}; + +static const enum base_hw_issue base_hw_issues_t72x_r1p1[] = { + BASE_HW_ISSUE_6402, + BASE_HW_ISSUE_9435, + BASE_HW_ISSUE_10471, + BASE_HW_ISSUE_10649, + BASE_HW_ISSUE_10684, + BASE_HW_ISSUE_10797, + BASE_HW_ISSUE_10821, + BASE_HW_ISSUE_10883, + BASE_HW_ISSUE_10946, + BASE_HW_ISSUE_11042, + BASE_HW_ISSUE_11051, + BASE_HW_ISSUE_11054, + BASE_HW_ISSUE_T720_1386, + BASE_HW_ISSUE_T76X_1909, + BASE_HW_ISSUE_T76X_1963, + BASE_HW_ISSUE_T76X_3964, + BASE_HW_ISSUE_TMIX_8438, + BASE_HW_ISSUE_END +}; + +static const enum base_hw_issue base_hw_issues_model_t72x[] = { + BASE_HW_ISSUE_5736, + BASE_HW_ISSUE_6402, + BASE_HW_ISSUE_9435, + BASE_HW_ISSUE_10471, + BASE_HW_ISSUE_10649, + BASE_HW_ISSUE_10797, + BASE_HW_ISSUE_11042, + BASE_HW_ISSUE_11051, + BASE_HW_ISSUE_T76X_1909, + BASE_HW_ISSUE_T76X_1963, + BASE_HW_ISSUE_T76X_3964, + GPUCORE_1619, + BASE_HW_ISSUE_END +}; + +static const enum base_hw_issue base_hw_issues_model_t76x[] = { + BASE_HW_ISSUE_5736, + BASE_HW_ISSUE_9435, + BASE_HW_ISSUE_11020, + BASE_HW_ISSUE_11024, + BASE_HW_ISSUE_11042, + BASE_HW_ISSUE_11051, + BASE_HW_ISSUE_T76X_1909, + BASE_HW_ISSUE_T76X_1963, + BASE_HW_ISSUE_T76X_3086, + BASE_HW_ISSUE_T76X_3700, + BASE_HW_ISSUE_T76X_3793, + BASE_HW_ISSUE_T76X_3964, + BASE_HW_ISSUE_T76X_3979, + BASE_HW_ISSUE_TMIX_7891, + GPUCORE_1619, + BASE_HW_ISSUE_END +}; + +static const enum base_hw_issue base_hw_issues_model_t60x[] = { + BASE_HW_ISSUE_5736, + BASE_HW_ISSUE_6402, + BASE_HW_ISSUE_8778, + BASE_HW_ISSUE_9435, + BASE_HW_ISSUE_10472, + BASE_HW_ISSUE_10649, + BASE_HW_ISSUE_10931, + BASE_HW_ISSUE_11012, + BASE_HW_ISSUE_11020, + BASE_HW_ISSUE_11024, + BASE_HW_ISSUE_11051, + BASE_HW_ISSUE_T76X_1909, + BASE_HW_ISSUE_T76X_1963, + BASE_HW_ISSUE_T76X_3964, + GPUCORE_1619, + BASE_HW_ISSUE_END +}; + +static const enum base_hw_issue base_hw_issues_model_t62x[] = { + BASE_HW_ISSUE_5736, + BASE_HW_ISSUE_6402, + BASE_HW_ISSUE_9435, + BASE_HW_ISSUE_10472, + BASE_HW_ISSUE_10649, + BASE_HW_ISSUE_10931, + BASE_HW_ISSUE_11012, + BASE_HW_ISSUE_11020, + BASE_HW_ISSUE_11024, + BASE_HW_ISSUE_11042, + BASE_HW_ISSUE_11051, + BASE_HW_ISSUE_T76X_1909, + BASE_HW_ISSUE_T76X_1963, + BASE_HW_ISSUE_T76X_3964, + GPUCORE_1619, + BASE_HW_ISSUE_END +}; + +static const enum base_hw_issue base_hw_issues_tFRx_r0p1[] = { + BASE_HW_ISSUE_9435, + BASE_HW_ISSUE_10821, + BASE_HW_ISSUE_10883, + BASE_HW_ISSUE_10946, + BASE_HW_ISSUE_11051, + BASE_HW_ISSUE_11054, + BASE_HW_ISSUE_T76X_1909, + BASE_HW_ISSUE_T76X_1963, + BASE_HW_ISSUE_T76X_3086, + BASE_HW_ISSUE_T76X_3700, + BASE_HW_ISSUE_T76X_3793, + BASE_HW_ISSUE_T76X_3953, + BASE_HW_ISSUE_T76X_3960, + BASE_HW_ISSUE_T76X_3964, + BASE_HW_ISSUE_T76X_3966, + BASE_HW_ISSUE_T76X_3979, + BASE_HW_ISSUE_TMIX_7891, + BASE_HW_ISSUE_TMIX_8438, + BASE_HW_ISSUE_END +}; + +static const enum base_hw_issue base_hw_issues_tFRx_r0p2[] = { + BASE_HW_ISSUE_9435, + BASE_HW_ISSUE_10821, + BASE_HW_ISSUE_10883, + BASE_HW_ISSUE_10946, + BASE_HW_ISSUE_11051, + BASE_HW_ISSUE_11054, + BASE_HW_ISSUE_T76X_1909, + BASE_HW_ISSUE_T76X_1963, + BASE_HW_ISSUE_T76X_3086, + BASE_HW_ISSUE_T76X_3700, + BASE_HW_ISSUE_T76X_3793, + BASE_HW_ISSUE_T76X_3953, + BASE_HW_ISSUE_T76X_3964, + BASE_HW_ISSUE_T76X_3966, + BASE_HW_ISSUE_T76X_3979, + BASE_HW_ISSUE_TMIX_7891, + BASE_HW_ISSUE_TMIX_8438, + BASE_HW_ISSUE_END +}; + +static const enum base_hw_issue base_hw_issues_tFRx_r1p0[] = { + BASE_HW_ISSUE_9435, + BASE_HW_ISSUE_10821, + BASE_HW_ISSUE_10883, + BASE_HW_ISSUE_10946, + BASE_HW_ISSUE_11051, + BASE_HW_ISSUE_11054, + BASE_HW_ISSUE_T76X_1963, + BASE_HW_ISSUE_T76X_3086, + BASE_HW_ISSUE_T76X_3700, + BASE_HW_ISSUE_T76X_3793, + BASE_HW_ISSUE_T76X_3953, + BASE_HW_ISSUE_T76X_3966, + BASE_HW_ISSUE_T76X_3979, + BASE_HW_ISSUE_TMIX_7891, + BASE_HW_ISSUE_TMIX_8438, + BASE_HW_ISSUE_END +}; + +static const enum base_hw_issue base_hw_issues_tFRx_r2p0[] = { + BASE_HW_ISSUE_9435, + BASE_HW_ISSUE_10821, + BASE_HW_ISSUE_10883, + BASE_HW_ISSUE_10946, + BASE_HW_ISSUE_11051, + BASE_HW_ISSUE_11054, + BASE_HW_ISSUE_T76X_1963, + BASE_HW_ISSUE_T76X_3086, + BASE_HW_ISSUE_T76X_3700, + BASE_HW_ISSUE_T76X_3793, + BASE_HW_ISSUE_T76X_3953, + BASE_HW_ISSUE_T76X_3966, + BASE_HW_ISSUE_T76X_3979, + BASE_HW_ISSUE_TMIX_7891, + BASE_HW_ISSUE_TMIX_8438, + BASE_HW_ISSUE_END +}; + +static const enum base_hw_issue base_hw_issues_model_tFRx[] = { + BASE_HW_ISSUE_5736, + BASE_HW_ISSUE_9435, + BASE_HW_ISSUE_11051, + BASE_HW_ISSUE_T76X_1963, + BASE_HW_ISSUE_T76X_3086, + BASE_HW_ISSUE_T76X_3700, + BASE_HW_ISSUE_T76X_3793, + BASE_HW_ISSUE_T76X_3964, + BASE_HW_ISSUE_T76X_3979, + BASE_HW_ISSUE_TMIX_7891, + GPUCORE_1619, + BASE_HW_ISSUE_END +}; + +static const enum base_hw_issue base_hw_issues_t86x_r0p2[] = { + BASE_HW_ISSUE_9435, + BASE_HW_ISSUE_10821, + BASE_HW_ISSUE_10883, + BASE_HW_ISSUE_10946, + BASE_HW_ISSUE_11051, + BASE_HW_ISSUE_11054, + BASE_HW_ISSUE_T76X_1909, + BASE_HW_ISSUE_T76X_1963, + BASE_HW_ISSUE_T76X_3086, + BASE_HW_ISSUE_T76X_3700, + BASE_HW_ISSUE_T76X_3793, + BASE_HW_ISSUE_T76X_3953, + BASE_HW_ISSUE_T76X_3964, + BASE_HW_ISSUE_T76X_3966, + BASE_HW_ISSUE_T76X_3979, + BASE_HW_ISSUE_TMIX_7891, + BASE_HW_ISSUE_TMIX_8438, + BASE_HW_ISSUE_END +}; + +static const enum base_hw_issue base_hw_issues_t86x_r1p0[] = { + BASE_HW_ISSUE_9435, + BASE_HW_ISSUE_10821, + BASE_HW_ISSUE_10883, + BASE_HW_ISSUE_10946, + BASE_HW_ISSUE_11051, + BASE_HW_ISSUE_11054, + BASE_HW_ISSUE_T76X_1963, + BASE_HW_ISSUE_T76X_3086, + BASE_HW_ISSUE_T76X_3700, + BASE_HW_ISSUE_T76X_3793, + BASE_HW_ISSUE_T76X_3953, + BASE_HW_ISSUE_T76X_3966, + BASE_HW_ISSUE_T76X_3979, + BASE_HW_ISSUE_TMIX_7891, + BASE_HW_ISSUE_TMIX_8438, + BASE_HW_ISSUE_END +}; + +static const enum base_hw_issue base_hw_issues_t86x_r2p0[] = { + BASE_HW_ISSUE_9435, + BASE_HW_ISSUE_10821, + BASE_HW_ISSUE_10883, + BASE_HW_ISSUE_10946, + BASE_HW_ISSUE_11051, + BASE_HW_ISSUE_11054, + BASE_HW_ISSUE_T76X_1963, + BASE_HW_ISSUE_T76X_3086, + BASE_HW_ISSUE_T76X_3700, + BASE_HW_ISSUE_T76X_3793, + BASE_HW_ISSUE_T76X_3953, + BASE_HW_ISSUE_T76X_3966, + BASE_HW_ISSUE_T76X_3979, + BASE_HW_ISSUE_TMIX_7891, + BASE_HW_ISSUE_TMIX_8438, + BASE_HW_ISSUE_END +}; + +static const enum base_hw_issue base_hw_issues_model_t86x[] = { + BASE_HW_ISSUE_5736, + BASE_HW_ISSUE_9435, + BASE_HW_ISSUE_11051, + BASE_HW_ISSUE_T76X_1963, + BASE_HW_ISSUE_T76X_3086, + BASE_HW_ISSUE_T76X_3700, + BASE_HW_ISSUE_T76X_3793, + BASE_HW_ISSUE_T76X_3979, + BASE_HW_ISSUE_TMIX_7891, + GPUCORE_1619, + BASE_HW_ISSUE_END +}; + +static const enum base_hw_issue base_hw_issues_t83x_r0p1[] = { + BASE_HW_ISSUE_9435, + BASE_HW_ISSUE_10821, + BASE_HW_ISSUE_10883, + BASE_HW_ISSUE_10946, + BASE_HW_ISSUE_11051, + BASE_HW_ISSUE_11054, + BASE_HW_ISSUE_T720_1386, + BASE_HW_ISSUE_T76X_1909, + BASE_HW_ISSUE_T76X_1963, + BASE_HW_ISSUE_T76X_3086, + BASE_HW_ISSUE_T76X_3700, + BASE_HW_ISSUE_T76X_3793, + BASE_HW_ISSUE_T76X_3953, + BASE_HW_ISSUE_T76X_3960, + BASE_HW_ISSUE_T76X_3979, + BASE_HW_ISSUE_TMIX_7891, + BASE_HW_ISSUE_TMIX_8438, + BASE_HW_ISSUE_END +}; + +static const enum base_hw_issue base_hw_issues_t83x_r1p0[] = { + BASE_HW_ISSUE_9435, + BASE_HW_ISSUE_10821, + BASE_HW_ISSUE_10883, + BASE_HW_ISSUE_10946, + BASE_HW_ISSUE_11051, + BASE_HW_ISSUE_11054, + BASE_HW_ISSUE_T720_1386, + BASE_HW_ISSUE_T76X_1963, + BASE_HW_ISSUE_T76X_3086, + BASE_HW_ISSUE_T76X_3700, + BASE_HW_ISSUE_T76X_3793, + BASE_HW_ISSUE_T76X_3953, + BASE_HW_ISSUE_T76X_3960, + BASE_HW_ISSUE_T76X_3979, + BASE_HW_ISSUE_TMIX_7891, + BASE_HW_ISSUE_TMIX_8438, + BASE_HW_ISSUE_END +}; + +static const enum base_hw_issue base_hw_issues_model_t83x[] = { + BASE_HW_ISSUE_5736, + BASE_HW_ISSUE_9435, + BASE_HW_ISSUE_11051, + BASE_HW_ISSUE_T76X_1963, + BASE_HW_ISSUE_T76X_3086, + BASE_HW_ISSUE_T76X_3700, + BASE_HW_ISSUE_T76X_3793, + BASE_HW_ISSUE_T76X_3964, + BASE_HW_ISSUE_T76X_3979, + BASE_HW_ISSUE_TMIX_7891, + GPUCORE_1619, + BASE_HW_ISSUE_TMIX_8438, + BASE_HW_ISSUE_END +}; + +static const enum base_hw_issue base_hw_issues_t82x_r0p0[] = { + BASE_HW_ISSUE_9435, + BASE_HW_ISSUE_10821, + BASE_HW_ISSUE_10883, + BASE_HW_ISSUE_10946, + BASE_HW_ISSUE_11051, + BASE_HW_ISSUE_11054, + BASE_HW_ISSUE_T720_1386, + BASE_HW_ISSUE_T76X_1909, + BASE_HW_ISSUE_T76X_1963, + BASE_HW_ISSUE_T76X_3086, + BASE_HW_ISSUE_T76X_3700, + BASE_HW_ISSUE_T76X_3793, + BASE_HW_ISSUE_T76X_3953, + BASE_HW_ISSUE_T76X_3960, + BASE_HW_ISSUE_T76X_3964, + BASE_HW_ISSUE_T76X_3979, + BASE_HW_ISSUE_TMIX_7891, + BASE_HW_ISSUE_TMIX_8438, + BASE_HW_ISSUE_END +}; + +static const enum base_hw_issue base_hw_issues_t82x_r0p1[] = { + BASE_HW_ISSUE_9435, + BASE_HW_ISSUE_10821, + BASE_HW_ISSUE_10883, + BASE_HW_ISSUE_10946, + BASE_HW_ISSUE_11051, + BASE_HW_ISSUE_11054, + BASE_HW_ISSUE_T720_1386, + BASE_HW_ISSUE_T76X_1909, + BASE_HW_ISSUE_T76X_1963, + BASE_HW_ISSUE_T76X_3086, + BASE_HW_ISSUE_T76X_3700, + BASE_HW_ISSUE_T76X_3793, + BASE_HW_ISSUE_T76X_3953, + BASE_HW_ISSUE_T76X_3960, + BASE_HW_ISSUE_T76X_3979, + BASE_HW_ISSUE_TMIX_7891, + BASE_HW_ISSUE_TMIX_8438, + BASE_HW_ISSUE_END +}; + +static const enum base_hw_issue base_hw_issues_t82x_r1p0[] = { + BASE_HW_ISSUE_9435, + BASE_HW_ISSUE_10821, + BASE_HW_ISSUE_10883, + BASE_HW_ISSUE_10946, + BASE_HW_ISSUE_11051, + BASE_HW_ISSUE_11054, + BASE_HW_ISSUE_T720_1386, + BASE_HW_ISSUE_T76X_1963, + BASE_HW_ISSUE_T76X_3086, + BASE_HW_ISSUE_T76X_3700, + BASE_HW_ISSUE_T76X_3793, + BASE_HW_ISSUE_T76X_3953, + BASE_HW_ISSUE_T76X_3960, + BASE_HW_ISSUE_T76X_3979, + BASE_HW_ISSUE_TMIX_7891, + BASE_HW_ISSUE_TMIX_8438, + BASE_HW_ISSUE_END +}; + +static const enum base_hw_issue base_hw_issues_model_t82x[] = { + BASE_HW_ISSUE_5736, + BASE_HW_ISSUE_9435, + BASE_HW_ISSUE_11051, + BASE_HW_ISSUE_T76X_1963, + BASE_HW_ISSUE_T76X_3086, + BASE_HW_ISSUE_T76X_3700, + BASE_HW_ISSUE_T76X_3793, + BASE_HW_ISSUE_T76X_3979, + BASE_HW_ISSUE_TMIX_7891, + GPUCORE_1619, + BASE_HW_ISSUE_END +}; + +static const enum base_hw_issue base_hw_issues_tMIx_r0p0_05dev0[] = { + BASE_HW_ISSUE_9435, + BASE_HW_ISSUE_10682, + BASE_HW_ISSUE_11054, + BASE_HW_ISSUE_T76X_3953, + BASE_HW_ISSUE_TMIX_7891, + BASE_HW_ISSUE_TMIX_8042, + BASE_HW_ISSUE_TMIX_8133, + BASE_HW_ISSUE_TMIX_8138, + BASE_HW_ISSUE_TMIX_8206, + BASE_HW_ISSUE_TMIX_8343, + BASE_HW_ISSUE_TMIX_8463, + BASE_HW_ISSUE_TMIX_8456, + BASE_HW_ISSUE_TMIX_8438, + BASE_HW_ISSUE_END +}; + +static const enum base_hw_issue base_hw_issues_tMIx_r0p0[] = { + BASE_HW_ISSUE_9435, + BASE_HW_ISSUE_10682, + BASE_HW_ISSUE_11054, + BASE_HW_ISSUE_TMIX_7891, + BASE_HW_ISSUE_TMIX_7940, + BASE_HW_ISSUE_TMIX_8042, + BASE_HW_ISSUE_TMIX_8133, + BASE_HW_ISSUE_TMIX_8138, + BASE_HW_ISSUE_TMIX_8206, + BASE_HW_ISSUE_TMIX_8343, + BASE_HW_ISSUE_TMIX_8463, + BASE_HW_ISSUE_TMIX_8456, + BASE_HW_ISSUE_TMIX_8438, + BASE_HW_ISSUE_END +}; + +static const enum base_hw_issue base_hw_issues_tMIx_r0p1[] = { + BASE_HW_ISSUE_9435, + BASE_HW_ISSUE_10682, + BASE_HW_ISSUE_11054, + BASE_HW_ISSUE_TMIX_7891, + BASE_HW_ISSUE_TMIX_7940, + BASE_HW_ISSUE_TMIX_8042, + BASE_HW_ISSUE_TMIX_8133, + BASE_HW_ISSUE_TMIX_8138, + BASE_HW_ISSUE_TMIX_8206, + BASE_HW_ISSUE_TMIX_8343, + BASE_HW_ISSUE_TMIX_8463, + BASE_HW_ISSUE_TMIX_8456, + BASE_HW_ISSUE_TMIX_8438, + BASE_HW_ISSUE_END +}; + +static const enum base_hw_issue base_hw_issues_model_tMIx[] = { + BASE_HW_ISSUE_5736, + BASE_HW_ISSUE_9435, + BASE_HW_ISSUE_TMIX_7891, + BASE_HW_ISSUE_TMIX_7940, + BASE_HW_ISSUE_TMIX_8042, + BASE_HW_ISSUE_TMIX_8133, + BASE_HW_ISSUE_TMIX_8138, + BASE_HW_ISSUE_TMIX_8206, + BASE_HW_ISSUE_TMIX_8343, + BASE_HW_ISSUE_TMIX_8456, + BASE_HW_ISSUE_END +}; + +static const enum base_hw_issue base_hw_issues_tHEx_r0p0[] = { + BASE_HW_ISSUE_9435, + BASE_HW_ISSUE_10682, + BASE_HW_ISSUE_11054, + BASE_HW_ISSUE_TMIX_7891, + BASE_HW_ISSUE_TMIX_8042, + BASE_HW_ISSUE_TMIX_8133, + BASE_HW_ISSUE_END +}; + +static const enum base_hw_issue base_hw_issues_tHEx_r0p1[] = { + BASE_HW_ISSUE_9435, + BASE_HW_ISSUE_10682, + BASE_HW_ISSUE_11054, + BASE_HW_ISSUE_TMIX_7891, + BASE_HW_ISSUE_TMIX_8042, + BASE_HW_ISSUE_TMIX_8133, + BASE_HW_ISSUE_END +}; + +static const enum base_hw_issue base_hw_issues_tHEx_r0p2[] = { + BASE_HW_ISSUE_9435, + BASE_HW_ISSUE_10682, + BASE_HW_ISSUE_11054, + BASE_HW_ISSUE_TMIX_7891, + BASE_HW_ISSUE_TMIX_8042, + BASE_HW_ISSUE_TMIX_8133, + BASE_HW_ISSUE_END +}; + +static const enum base_hw_issue base_hw_issues_tHEx_r0p3[] = { + BASE_HW_ISSUE_9435, + BASE_HW_ISSUE_10682, + BASE_HW_ISSUE_TMIX_7891, + BASE_HW_ISSUE_TMIX_8042, + BASE_HW_ISSUE_TMIX_8133, + BASE_HW_ISSUE_END +}; + +static const enum base_hw_issue base_hw_issues_model_tHEx[] = { + BASE_HW_ISSUE_5736, + BASE_HW_ISSUE_9435, + BASE_HW_ISSUE_TMIX_7891, + BASE_HW_ISSUE_TMIX_8042, + BASE_HW_ISSUE_TMIX_8133, + BASE_HW_ISSUE_END +}; + +static const enum base_hw_issue base_hw_issues_tSIx_r0p0[] = { + BASE_HW_ISSUE_9435, + BASE_HW_ISSUE_11054, + BASE_HW_ISSUE_TMIX_8133, + BASE_HW_ISSUE_TSIX_1116, + BASE_HW_ISSUE_END +}; + +static const enum base_hw_issue base_hw_issues_tSIx_r0p1[] = { + BASE_HW_ISSUE_9435, + BASE_HW_ISSUE_11054, + BASE_HW_ISSUE_TMIX_8133, + BASE_HW_ISSUE_TSIX_1116, + BASE_HW_ISSUE_END +}; + +static const enum base_hw_issue base_hw_issues_tSIx_r1p0[] = { + BASE_HW_ISSUE_9435, + BASE_HW_ISSUE_11054, + BASE_HW_ISSUE_TMIX_8133, + BASE_HW_ISSUE_TSIX_1116, + BASE_HW_ISSUE_END +}; + +static const enum base_hw_issue base_hw_issues_tSIx_r1p1[] = { + BASE_HW_ISSUE_9435, + BASE_HW_ISSUE_TMIX_8133, + BASE_HW_ISSUE_TSIX_1116, + BASE_HW_ISSUE_END +}; + +static const enum base_hw_issue base_hw_issues_model_tSIx[] = { + BASE_HW_ISSUE_5736, + BASE_HW_ISSUE_9435, + BASE_HW_ISSUE_TMIX_8133, + BASE_HW_ISSUE_TSIX_1116, + BASE_HW_ISSUE_END +}; + +static const enum base_hw_issue base_hw_issues_tDVx_r0p0[] = { + BASE_HW_ISSUE_9435, + BASE_HW_ISSUE_11054, + BASE_HW_ISSUE_TMIX_8133, + BASE_HW_ISSUE_TSIX_1116, + BASE_HW_ISSUE_END +}; + +static const enum base_hw_issue base_hw_issues_model_tDVx[] = { + BASE_HW_ISSUE_5736, + BASE_HW_ISSUE_9435, + BASE_HW_ISSUE_TMIX_8133, + BASE_HW_ISSUE_TSIX_1116, + BASE_HW_ISSUE_END +}; + +static const enum base_hw_issue base_hw_issues_tNOx_r0p0[] = { + BASE_HW_ISSUE_9435, + BASE_HW_ISSUE_TMIX_8133, + BASE_HW_ISSUE_TSIX_1116, + BASE_HW_ISSUE_END +}; + +static const enum base_hw_issue base_hw_issues_model_tNOx[] = { + BASE_HW_ISSUE_5736, + BASE_HW_ISSUE_9435, + BASE_HW_ISSUE_TMIX_8133, + BASE_HW_ISSUE_TSIX_1116, + BASE_HW_ISSUE_END +}; + +static const enum base_hw_issue base_hw_issues_tGOx_r0p0[] = { + BASE_HW_ISSUE_9435, + BASE_HW_ISSUE_TMIX_8133, + BASE_HW_ISSUE_TSIX_1116, + BASE_HW_ISSUE_END +}; + +static const enum base_hw_issue base_hw_issues_model_tGOx[] = { + BASE_HW_ISSUE_5736, + BASE_HW_ISSUE_9435, + BASE_HW_ISSUE_TMIX_8133, + BASE_HW_ISSUE_TSIX_1116, + BASE_HW_ISSUE_END +}; + +static const enum base_hw_issue base_hw_issues_tKAx_r0p0[] = { + BASE_HW_ISSUE_9435, + BASE_HW_ISSUE_TMIX_8133, + BASE_HW_ISSUE_TSIX_1116, + BASE_HW_ISSUE_END +}; + +static const enum base_hw_issue base_hw_issues_model_tKAx[] = { + BASE_HW_ISSUE_5736, + BASE_HW_ISSUE_9435, + BASE_HW_ISSUE_TMIX_8133, + BASE_HW_ISSUE_TSIX_1116, + BASE_HW_ISSUE_END +}; + +static const enum base_hw_issue base_hw_issues_tTRx_r0p0[] = { + BASE_HW_ISSUE_9435, + BASE_HW_ISSUE_TMIX_8133, + BASE_HW_ISSUE_TSIX_1116, + BASE_HW_ISSUE_END +}; + +static const enum base_hw_issue base_hw_issues_model_tTRx[] = { + BASE_HW_ISSUE_5736, + BASE_HW_ISSUE_9435, + BASE_HW_ISSUE_TMIX_8133, + BASE_HW_ISSUE_TSIX_1116, + BASE_HW_ISSUE_END +}; + +static const enum base_hw_issue base_hw_issues_tBOx_r0p0[] = { + BASE_HW_ISSUE_9435, + BASE_HW_ISSUE_TMIX_8133, + BASE_HW_ISSUE_TSIX_1116, + BASE_HW_ISSUE_END +}; + +static const enum base_hw_issue base_hw_issues_model_tBOx[] = { + BASE_HW_ISSUE_5736, + BASE_HW_ISSUE_9435, + BASE_HW_ISSUE_TMIX_8133, + BASE_HW_ISSUE_TSIX_1116, + BASE_HW_ISSUE_END +}; + +#endif /* _BASE_HWCONFIG_ISSUES_H_ */ diff --git a/drivers/gpu/arm/midgard/mali_base_kernel.h b/drivers/gpu/arm/midgard/mali_base_kernel.h new file mode 100644 index 00000000000000..2f3c65b32e232a --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_base_kernel.h @@ -0,0 +1,1822 @@ +/* + * + * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + + +/* + * Base structures shared with the kernel. + */ + +#ifndef _BASE_KERNEL_H_ +#define _BASE_KERNEL_H_ + +/* Support UK10_2 IOCTLS */ +#define BASE_LEGACY_UK10_2_SUPPORT 1 + +/* Support UK10_4 IOCTLS */ +#define BASE_LEGACY_UK10_4_SUPPORT 1 + +typedef struct base_mem_handle { + struct { + u64 handle; + } basep; +} base_mem_handle; + +#include "mali_base_mem_priv.h" +#include "mali_kbase_profiling_gator_api.h" +#include "mali_midg_coherency.h" +#include "mali_kbase_gpu_id.h" + +/* + * Dependency stuff, keep it private for now. May want to expose it if + * we decide to make the number of semaphores a configurable + * option. + */ +#define BASE_JD_ATOM_COUNT 256 + +/* Set/reset values for a software event */ +#define BASE_JD_SOFT_EVENT_SET ((unsigned char)1) +#define BASE_JD_SOFT_EVENT_RESET ((unsigned char)0) + +#define BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS 3 + +#define BASE_MAX_COHERENT_GROUPS 16 + +#if defined CDBG_ASSERT +#define LOCAL_ASSERT CDBG_ASSERT +#elif defined KBASE_DEBUG_ASSERT +#define LOCAL_ASSERT KBASE_DEBUG_ASSERT +#else +#error assert macro not defined! +#endif + +#if defined PAGE_MASK +#define LOCAL_PAGE_LSB ~PAGE_MASK +#else +#include + +#if defined OSU_CONFIG_CPU_PAGE_SIZE_LOG2 +#define LOCAL_PAGE_LSB ((1ul << OSU_CONFIG_CPU_PAGE_SIZE_LOG2) - 1) +#else +#error Failed to find page size +#endif +#endif + +/** + * @addtogroup base_user_api User-side Base APIs + * @{ + */ + +/** + * @addtogroup base_user_api_memory User-side Base Memory APIs + * @{ + */ + +/** + * typedef base_mem_alloc_flags - Memory allocation, access/hint flags. + * + * A combination of MEM_PROT/MEM_HINT flags must be passed to each allocator + * in order to determine the best cache policy. Some combinations are + * of course invalid (e.g. MEM_PROT_CPU_WR | MEM_HINT_CPU_RD), + * which defines a write-only region on the CPU side, which is + * heavily read by the CPU... + * Other flags are only meaningful to a particular allocator. + * More flags can be added to this list, as long as they don't clash + * (see BASE_MEM_FLAGS_NR_BITS for the number of the first free bit). + */ +typedef u32 base_mem_alloc_flags; + +/* Memory allocation, access/hint flags. + * + * See base_mem_alloc_flags. + */ + +/* IN */ +/* Read access CPU side + */ +#define BASE_MEM_PROT_CPU_RD ((base_mem_alloc_flags)1 << 0) + +/* Write access CPU side + */ +#define BASE_MEM_PROT_CPU_WR ((base_mem_alloc_flags)1 << 1) + +/* Read access GPU side + */ +#define BASE_MEM_PROT_GPU_RD ((base_mem_alloc_flags)1 << 2) + +/* Write access GPU side + */ +#define BASE_MEM_PROT_GPU_WR ((base_mem_alloc_flags)1 << 3) + +/* Execute allowed on the GPU side + */ +#define BASE_MEM_PROT_GPU_EX ((base_mem_alloc_flags)1 << 4) + + /* BASE_MEM_HINT flags have been removed, but their values are reserved + * for backwards compatibility with older user-space drivers. The values + * can be re-used once support for r5p0 user-space drivers is removed, + * presumably in r7p0. + * + * RESERVED: (1U << 5) + * RESERVED: (1U << 6) + * RESERVED: (1U << 7) + * RESERVED: (1U << 8) + */ + +/* Grow backing store on GPU Page Fault + */ +#define BASE_MEM_GROW_ON_GPF ((base_mem_alloc_flags)1 << 9) + +/* Page coherence Outer shareable, if available + */ +#define BASE_MEM_COHERENT_SYSTEM ((base_mem_alloc_flags)1 << 10) + +/* Page coherence Inner shareable + */ +#define BASE_MEM_COHERENT_LOCAL ((base_mem_alloc_flags)1 << 11) + +/* Should be cached on the CPU + */ +#define BASE_MEM_CACHED_CPU ((base_mem_alloc_flags)1 << 12) + +/* IN/OUT */ +/* Must have same VA on both the GPU and the CPU + */ +#define BASE_MEM_SAME_VA ((base_mem_alloc_flags)1 << 13) + +/* OUT */ +/* Must call mmap to acquire a GPU address for the alloc + */ +#define BASE_MEM_NEED_MMAP ((base_mem_alloc_flags)1 << 14) + +/* IN */ +/* Page coherence Outer shareable, required. + */ +#define BASE_MEM_COHERENT_SYSTEM_REQUIRED ((base_mem_alloc_flags)1 << 15) + +/* Secure memory + */ +#define BASE_MEM_SECURE ((base_mem_alloc_flags)1 << 16) + +/* Not needed physical memory + */ +#define BASE_MEM_DONT_NEED ((base_mem_alloc_flags)1 << 17) + +/* Must use shared CPU/GPU zone (SAME_VA zone) but doesn't require the + * addresses to be the same + */ +#define BASE_MEM_IMPORT_SHARED ((base_mem_alloc_flags)1 << 18) + +/** + * Bit 19 is reserved. + * + * Do not remove, use the next unreserved bit for new flags + **/ +#define BASE_MEM_RESERVED_BIT_19 ((base_mem_alloc_flags)1 << 19) + +/* Number of bits used as flags for base memory management + * + * Must be kept in sync with the base_mem_alloc_flags flags + */ +#define BASE_MEM_FLAGS_NR_BITS 20 + +/* A mask for all output bits, excluding IN/OUT bits. + */ +#define BASE_MEM_FLAGS_OUTPUT_MASK BASE_MEM_NEED_MMAP + +/* A mask for all input bits, including IN/OUT bits. + */ +#define BASE_MEM_FLAGS_INPUT_MASK \ + (((1 << BASE_MEM_FLAGS_NR_BITS) - 1) & ~BASE_MEM_FLAGS_OUTPUT_MASK) + +/* A mask for all the flags which are modifiable via the base_mem_set_flags + * interface. + */ +#define BASE_MEM_FLAGS_MODIFIABLE \ + (BASE_MEM_DONT_NEED | BASE_MEM_COHERENT_SYSTEM | \ + BASE_MEM_COHERENT_LOCAL) + +/** + * enum base_mem_import_type - Memory types supported by @a base_mem_import + * + * @BASE_MEM_IMPORT_TYPE_INVALID: Invalid type + * @BASE_MEM_IMPORT_TYPE_UMP: UMP import. Handle type is ump_secure_id. + * @BASE_MEM_IMPORT_TYPE_UMM: UMM import. Handle type is a file descriptor (int) + * @BASE_MEM_IMPORT_TYPE_USER_BUFFER: User buffer import. Handle is a + * base_mem_import_user_buffer + * + * Each type defines what the supported handle type is. + * + * If any new type is added here ARM must be contacted + * to allocate a numeric value for it. + * Do not just add a new type without synchronizing with ARM + * as future releases from ARM might include other new types + * which could clash with your custom types. + */ +typedef enum base_mem_import_type { + BASE_MEM_IMPORT_TYPE_INVALID = 0, + BASE_MEM_IMPORT_TYPE_UMP = 1, + BASE_MEM_IMPORT_TYPE_UMM = 2, + BASE_MEM_IMPORT_TYPE_USER_BUFFER = 3 +} base_mem_import_type; + +/** + * struct base_mem_import_user_buffer - Handle of an imported user buffer + * + * @ptr: address of imported user buffer + * @length: length of imported user buffer in bytes + * + * This structure is used to represent a handle of an imported user buffer. + */ + +struct base_mem_import_user_buffer { + u64 ptr; + u64 length; +}; + +/** + * @brief Invalid memory handle. + * + * Return value from functions returning @ref base_mem_handle on error. + * + * @warning @ref base_mem_handle_new_invalid must be used instead of this macro + * in C++ code or other situations where compound literals cannot be used. + */ +#define BASE_MEM_INVALID_HANDLE ((base_mem_handle) { {BASEP_MEM_INVALID_HANDLE} }) + +/** + * @brief Special write-alloc memory handle. + * + * A special handle is used to represent a region where a special page is mapped + * with a write-alloc cache setup, typically used when the write result of the + * GPU isn't needed, but the GPU must write anyway. + * + * @warning @ref base_mem_handle_new_write_alloc must be used instead of this macro + * in C++ code or other situations where compound literals cannot be used. + */ +#define BASE_MEM_WRITE_ALLOC_PAGES_HANDLE ((base_mem_handle) { {BASEP_MEM_WRITE_ALLOC_PAGES_HANDLE} }) + +#define BASEP_MEM_INVALID_HANDLE (0ull << 12) +#define BASE_MEM_MMU_DUMP_HANDLE (1ull << 12) +#define BASE_MEM_TRACE_BUFFER_HANDLE (2ull << 12) +#define BASE_MEM_MAP_TRACKING_HANDLE (3ull << 12) +#define BASEP_MEM_WRITE_ALLOC_PAGES_HANDLE (4ull << 12) +/* reserved handles ..-64< for future special handles */ +#define BASE_MEM_COOKIE_BASE (64ul << 12) +#define BASE_MEM_FIRST_FREE_ADDRESS ((BITS_PER_LONG << 12) + \ + BASE_MEM_COOKIE_BASE) + +/* Mask to detect 4GB boundary alignment */ +#define BASE_MEM_MASK_4GB 0xfffff000UL + + +/* Bit mask of cookies used for for memory allocation setup */ +#define KBASE_COOKIE_MASK ~1UL /* bit 0 is reserved */ + + +/** + * @brief Result codes of changing the size of the backing store allocated to a tmem region + */ +typedef enum base_backing_threshold_status { + BASE_BACKING_THRESHOLD_OK = 0, /**< Resize successful */ + BASE_BACKING_THRESHOLD_ERROR_OOM = -2, /**< Increase failed due to an out-of-memory condition */ + BASE_BACKING_THRESHOLD_ERROR_INVALID_ARGUMENTS = -4 /**< Invalid arguments (not tmem, illegal size request, etc.) */ +} base_backing_threshold_status; + +/** + * @addtogroup base_user_api_memory_defered User-side Base Defered Memory Coherency APIs + * @{ + */ + +/** + * @brief a basic memory operation (sync-set). + * + * The content of this structure is private, and should only be used + * by the accessors. + */ +typedef struct base_syncset { + struct basep_syncset basep_sset; +} base_syncset; + +/** @} end group base_user_api_memory_defered */ + +/** + * Handle to represent imported memory object. + * Simple opague handle to imported memory, can't be used + * with anything but base_external_resource_init to bind to an atom. + */ +typedef struct base_import_handle { + struct { + u64 handle; + } basep; +} base_import_handle; + +/** @} end group base_user_api_memory */ + +/** + * @addtogroup base_user_api_job_dispatch User-side Base Job Dispatcher APIs + * @{ + */ + +typedef int platform_fence_type; +#define INVALID_PLATFORM_FENCE ((platform_fence_type)-1) + +/** + * Base stream handle. + * + * References an underlying base stream object. + */ +typedef struct base_stream { + struct { + int fd; + } basep; +} base_stream; + +/** + * Base fence handle. + * + * References an underlying base fence object. + */ +typedef struct base_fence { + struct { + int fd; + int stream_fd; + } basep; +} base_fence; + +/** + * @brief Per-job data + * + * This structure is used to store per-job data, and is completely unused + * by the Base driver. It can be used to store things such as callback + * function pointer, data to handle job completion. It is guaranteed to be + * untouched by the Base driver. + */ +typedef struct base_jd_udata { + u64 blob[2]; /**< per-job data array */ +} base_jd_udata; + +/** + * @brief Memory aliasing info + * + * Describes a memory handle to be aliased. + * A subset of the handle can be chosen for aliasing, given an offset and a + * length. + * A special handle BASE_MEM_WRITE_ALLOC_PAGES_HANDLE is used to represent a + * region where a special page is mapped with a write-alloc cache setup, + * typically used when the write result of the GPU isn't needed, but the GPU + * must write anyway. + * + * Offset and length are specified in pages. + * Offset must be within the size of the handle. + * Offset+length must not overrun the size of the handle. + * + * @handle Handle to alias, can be BASE_MEM_WRITE_ALLOC_PAGES_HANDLE + * @offset Offset within the handle to start aliasing from, in pages. + * Not used with BASE_MEM_WRITE_ALLOC_PAGES_HANDLE. + * @length Length to alias, in pages. For BASE_MEM_WRITE_ALLOC_PAGES_HANDLE + * specifies the number of times the special page is needed. + */ +struct base_mem_aliasing_info { + base_mem_handle handle; + u64 offset; + u64 length; +}; + +/** + * struct base_jit_alloc_info - Structure which describes a JIT allocation + * request. + * @gpu_alloc_addr: The GPU virtual address to write the JIT + * allocated GPU virtual address to. + * @va_pages: The minimum number of virtual pages required. + * @commit_pages: The minimum number of physical pages which + * should back the allocation. + * @extent: Granularity of physical pages to grow the + * allocation by during a fault. + * @id: Unique ID provided by the caller, this is used + * to pair allocation and free requests. + * Zero is not a valid value. + */ +struct base_jit_alloc_info { + u64 gpu_alloc_addr; + u64 va_pages; + u64 commit_pages; + u64 extent; + u8 id; +}; + +/** + * @brief Job dependency type. + * + * A flags field will be inserted into the atom structure to specify whether a dependency is a data or + * ordering dependency (by putting it before/after 'core_req' in the structure it should be possible to add without + * changing the structure size). + * When the flag is set for a particular dependency to signal that it is an ordering only dependency then + * errors will not be propagated. + */ +typedef u8 base_jd_dep_type; + + +#define BASE_JD_DEP_TYPE_INVALID (0) /**< Invalid dependency */ +#define BASE_JD_DEP_TYPE_DATA (1U << 0) /**< Data dependency */ +#define BASE_JD_DEP_TYPE_ORDER (1U << 1) /**< Order dependency */ + +/** + * @brief Job chain hardware requirements. + * + * A job chain must specify what GPU features it needs to allow the + * driver to schedule the job correctly. By not specifying the + * correct settings can/will cause an early job termination. Multiple + * values can be ORed together to specify multiple requirements. + * Special case is ::BASE_JD_REQ_DEP, which is used to express complex + * dependencies, and that doesn't execute anything on the hardware. + */ +typedef u32 base_jd_core_req; + +/* Requirements that come from the HW */ + +/** + * No requirement, dependency only + */ +#define BASE_JD_REQ_DEP ((base_jd_core_req)0) + +/** + * Requires fragment shaders + */ +#define BASE_JD_REQ_FS ((base_jd_core_req)1 << 0) + +/** + * Requires compute shaders + * This covers any of the following Midgard Job types: + * - Vertex Shader Job + * - Geometry Shader Job + * - An actual Compute Shader Job + * + * Compare this with @ref BASE_JD_REQ_ONLY_COMPUTE, which specifies that the + * job is specifically just the "Compute Shader" job type, and not the "Vertex + * Shader" nor the "Geometry Shader" job type. + */ +#define BASE_JD_REQ_CS ((base_jd_core_req)1 << 1) +#define BASE_JD_REQ_T ((base_jd_core_req)1 << 2) /**< Requires tiling */ +#define BASE_JD_REQ_CF ((base_jd_core_req)1 << 3) /**< Requires cache flushes */ +#define BASE_JD_REQ_V ((base_jd_core_req)1 << 4) /**< Requires value writeback */ + +/* SW-only requirements - the HW does not expose these as part of the job slot capabilities */ + +/* Requires fragment job with AFBC encoding */ +#define BASE_JD_REQ_FS_AFBC ((base_jd_core_req)1 << 13) + +/** + * SW-only requirement: coalesce completion events. + * If this bit is set then completion of this atom will not cause an event to + * be sent to userspace, whether successful or not; completion events will be + * deferred until an atom completes which does not have this bit set. + * + * This bit may not be used in combination with BASE_JD_REQ_EXTERNAL_RESOURCES. + */ +#define BASE_JD_REQ_EVENT_COALESCE ((base_jd_core_req)1 << 5) + +/** + * SW Only requirement: the job chain requires a coherent core group. We don't + * mind which coherent core group is used. + */ +#define BASE_JD_REQ_COHERENT_GROUP ((base_jd_core_req)1 << 6) + +/** + * SW Only requirement: The performance counters should be enabled only when + * they are needed, to reduce power consumption. + */ + +#define BASE_JD_REQ_PERMON ((base_jd_core_req)1 << 7) + +/** + * SW Only requirement: External resources are referenced by this atom. + * When external resources are referenced no syncsets can be bundled with the atom + * but should instead be part of a NULL jobs inserted into the dependency tree. + * The first pre_dep object must be configured for the external resouces to use, + * the second pre_dep object can be used to create other dependencies. + * + * This bit may not be used in combination with BASE_JD_REQ_EVENT_COALESCE. + */ +#define BASE_JD_REQ_EXTERNAL_RESOURCES ((base_jd_core_req)1 << 8) + +/** + * SW Only requirement: Software defined job. Jobs with this bit set will not be submitted + * to the hardware but will cause some action to happen within the driver + */ +#define BASE_JD_REQ_SOFT_JOB ((base_jd_core_req)1 << 9) + +#define BASE_JD_REQ_SOFT_DUMP_CPU_GPU_TIME (BASE_JD_REQ_SOFT_JOB | 0x1) +#define BASE_JD_REQ_SOFT_FENCE_TRIGGER (BASE_JD_REQ_SOFT_JOB | 0x2) +#define BASE_JD_REQ_SOFT_FENCE_WAIT (BASE_JD_REQ_SOFT_JOB | 0x3) + +/** + * SW Only requirement : Replay job. + * + * If the preceding job fails, the replay job will cause the jobs specified in + * the list of base_jd_replay_payload pointed to by the jc pointer to be + * replayed. + * + * A replay job will only cause jobs to be replayed up to BASEP_JD_REPLAY_LIMIT + * times. If a job fails more than BASEP_JD_REPLAY_LIMIT times then the replay + * job is failed, as well as any following dependencies. + * + * The replayed jobs will require a number of atom IDs. If there are not enough + * free atom IDs then the replay job will fail. + * + * If the preceding job does not fail, then the replay job is returned as + * completed. + * + * The replayed jobs will never be returned to userspace. The preceding failed + * job will be returned to userspace as failed; the status of this job should + * be ignored. Completion should be determined by the status of the replay soft + * job. + * + * In order for the jobs to be replayed, the job headers will have to be + * modified. The Status field will be reset to NOT_STARTED. If the Job Type + * field indicates a Vertex Shader Job then it will be changed to Null Job. + * + * The replayed jobs have the following assumptions : + * + * - No external resources. Any required external resources will be held by the + * replay atom. + * - Pre-dependencies are created based on job order. + * - Atom numbers are automatically assigned. + * - device_nr is set to 0. This is not relevant as + * BASE_JD_REQ_SPECIFIC_COHERENT_GROUP should not be set. + * - Priority is inherited from the replay job. + */ +#define BASE_JD_REQ_SOFT_REPLAY (BASE_JD_REQ_SOFT_JOB | 0x4) +/** + * SW only requirement: event wait/trigger job. + * + * - BASE_JD_REQ_SOFT_EVENT_WAIT: this job will block until the event is set. + * - BASE_JD_REQ_SOFT_EVENT_SET: this job sets the event, thus unblocks the + * other waiting jobs. It completes immediately. + * - BASE_JD_REQ_SOFT_EVENT_RESET: this job resets the event, making it + * possible for other jobs to wait upon. It completes immediately. + */ +#define BASE_JD_REQ_SOFT_EVENT_WAIT (BASE_JD_REQ_SOFT_JOB | 0x5) +#define BASE_JD_REQ_SOFT_EVENT_SET (BASE_JD_REQ_SOFT_JOB | 0x6) +#define BASE_JD_REQ_SOFT_EVENT_RESET (BASE_JD_REQ_SOFT_JOB | 0x7) + +#define BASE_JD_REQ_SOFT_DEBUG_COPY (BASE_JD_REQ_SOFT_JOB | 0x8) + +/** + * SW only requirement: Just In Time allocation + * + * This job requests a JIT allocation based on the request in the + * @base_jit_alloc_info structure which is passed via the jc element of + * the atom. + * + * It should be noted that the id entry in @base_jit_alloc_info must not + * be reused until it has been released via @BASE_JD_REQ_SOFT_JIT_FREE. + * + * Should this soft job fail it is expected that a @BASE_JD_REQ_SOFT_JIT_FREE + * soft job to free the JIT allocation is still made. + * + * The job will complete immediately. + */ +#define BASE_JD_REQ_SOFT_JIT_ALLOC (BASE_JD_REQ_SOFT_JOB | 0x9) +/** + * SW only requirement: Just In Time free + * + * This job requests a JIT allocation created by @BASE_JD_REQ_SOFT_JIT_ALLOC + * to be freed. The ID of the JIT allocation is passed via the jc element of + * the atom. + * + * The job will complete immediately. + */ +#define BASE_JD_REQ_SOFT_JIT_FREE (BASE_JD_REQ_SOFT_JOB | 0xa) + +/** + * SW only requirement: Map external resource + * + * This job requests external resource(s) are mapped once the dependencies + * of the job have been satisfied. The list of external resources are + * passed via the jc element of the atom which is a pointer to a + * @base_external_resource_list. + */ +#define BASE_JD_REQ_SOFT_EXT_RES_MAP (BASE_JD_REQ_SOFT_JOB | 0xb) +/** + * SW only requirement: Unmap external resource + * + * This job requests external resource(s) are unmapped once the dependencies + * of the job has been satisfied. The list of external resources are + * passed via the jc element of the atom which is a pointer to a + * @base_external_resource_list. + */ +#define BASE_JD_REQ_SOFT_EXT_RES_UNMAP (BASE_JD_REQ_SOFT_JOB | 0xc) + +/** + * HW Requirement: Requires Compute shaders (but not Vertex or Geometry Shaders) + * + * This indicates that the Job Chain contains Midgard Jobs of the 'Compute Shaders' type. + * + * In contrast to @ref BASE_JD_REQ_CS, this does \b not indicate that the Job + * Chain contains 'Geometry Shader' or 'Vertex Shader' jobs. + */ +#define BASE_JD_REQ_ONLY_COMPUTE ((base_jd_core_req)1 << 10) + +/** + * HW Requirement: Use the base_jd_atom::device_nr field to specify a + * particular core group + * + * If both @ref BASE_JD_REQ_COHERENT_GROUP and this flag are set, this flag takes priority + * + * This is only guaranteed to work for @ref BASE_JD_REQ_ONLY_COMPUTE atoms. + * + * If the core availability policy is keeping the required core group turned off, then + * the job will fail with a @ref BASE_JD_EVENT_PM_EVENT error code. + */ +#define BASE_JD_REQ_SPECIFIC_COHERENT_GROUP ((base_jd_core_req)1 << 11) + +/** + * SW Flag: If this bit is set then the successful completion of this atom + * will not cause an event to be sent to userspace + */ +#define BASE_JD_REQ_EVENT_ONLY_ON_FAILURE ((base_jd_core_req)1 << 12) + +/** + * SW Flag: If this bit is set then completion of this atom will not cause an + * event to be sent to userspace, whether successful or not. + */ +#define BASEP_JD_REQ_EVENT_NEVER ((base_jd_core_req)1 << 14) + +/** + * SW Flag: Skip GPU cache clean and invalidation before starting a GPU job. + * + * If this bit is set then the GPU's cache will not be cleaned and invalidated + * until a GPU job starts which does not have this bit set or a job completes + * which does not have the @ref BASE_JD_REQ_SKIP_CACHE_END bit set. Do not use if + * the CPU may have written to memory addressed by the job since the last job + * without this bit set was submitted. + */ +#define BASE_JD_REQ_SKIP_CACHE_START ((base_jd_core_req)1 << 15) + +/** + * SW Flag: Skip GPU cache clean and invalidation after a GPU job completes. + * + * If this bit is set then the GPU's cache will not be cleaned and invalidated + * until a GPU job completes which does not have this bit set or a job starts + * which does not have the @ref BASE_JD_REQ_SKIP_CACHE_START bti set. Do not use if + * the CPU may read from or partially overwrite memory addressed by the job + * before the next job without this bit set completes. + */ +#define BASE_JD_REQ_SKIP_CACHE_END ((base_jd_core_req)1 << 16) + +/** + * These requirement bits are currently unused in base_jd_core_req + */ +#define BASEP_JD_REQ_RESERVED \ + (~(BASE_JD_REQ_ATOM_TYPE | BASE_JD_REQ_EXTERNAL_RESOURCES | \ + BASE_JD_REQ_EVENT_ONLY_ON_FAILURE | BASEP_JD_REQ_EVENT_NEVER | \ + BASE_JD_REQ_EVENT_COALESCE | \ + BASE_JD_REQ_COHERENT_GROUP | BASE_JD_REQ_SPECIFIC_COHERENT_GROUP | \ + BASE_JD_REQ_FS_AFBC | BASE_JD_REQ_PERMON | \ + BASE_JD_REQ_SKIP_CACHE_START | BASE_JD_REQ_SKIP_CACHE_END)) + +/** + * Mask of all bits in base_jd_core_req that control the type of the atom. + * + * This allows dependency only atoms to have flags set + */ +#define BASE_JD_REQ_ATOM_TYPE \ + (BASE_JD_REQ_FS | BASE_JD_REQ_CS | BASE_JD_REQ_T | BASE_JD_REQ_CF | \ + BASE_JD_REQ_V | BASE_JD_REQ_SOFT_JOB | BASE_JD_REQ_ONLY_COMPUTE) + +/** + * Mask of all bits in base_jd_core_req that control the type of a soft job. + */ +#define BASE_JD_REQ_SOFT_JOB_TYPE (BASE_JD_REQ_SOFT_JOB | 0x1f) + +/* + * Returns non-zero value if core requirements passed define a soft job or + * a dependency only job. + */ +#define BASE_JD_REQ_SOFT_JOB_OR_DEP(core_req) \ + ((core_req & BASE_JD_REQ_SOFT_JOB) || \ + (core_req & BASE_JD_REQ_ATOM_TYPE) == BASE_JD_REQ_DEP) + +/** + * @brief States to model state machine processed by kbasep_js_job_check_ref_cores(), which + * handles retaining cores for power management and affinity management. + * + * The state @ref KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY prevents an attack + * where lots of atoms could be submitted before powerup, and each has an + * affinity chosen that causes other atoms to have an affinity + * violation. Whilst the affinity was not causing violations at the time it + * was chosen, it could cause violations thereafter. For example, 1000 jobs + * could have had their affinity chosen during the powerup time, so any of + * those 1000 jobs could cause an affinity violation later on. + * + * The attack would otherwise occur because other atoms/contexts have to wait for: + * -# the currently running atoms (which are causing the violation) to + * finish + * -# and, the atoms that had their affinity chosen during powerup to + * finish. These are run preferentially because they don't cause a + * violation, but instead continue to cause the violation in others. + * -# or, the attacker is scheduled out (which might not happen for just 2 + * contexts) + * + * By re-choosing the affinity (which is designed to avoid violations at the + * time it's chosen), we break condition (2) of the wait, which minimizes the + * problem to just waiting for current jobs to finish (which can be bounded if + * the Job Scheduling Policy has a timer). + */ +enum kbase_atom_coreref_state { + /** Starting state: No affinity chosen, and cores must be requested. kbase_jd_atom::affinity==0 */ + KBASE_ATOM_COREREF_STATE_NO_CORES_REQUESTED, + /** Cores requested, but waiting for them to be powered. Requested cores given by kbase_jd_atom::affinity */ + KBASE_ATOM_COREREF_STATE_WAITING_FOR_REQUESTED_CORES, + /** Cores given by kbase_jd_atom::affinity are powered, but affinity might be out-of-date, so must recheck */ + KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY, + /** Cores given by kbase_jd_atom::affinity are powered, and affinity is up-to-date, but must check for violations */ + KBASE_ATOM_COREREF_STATE_CHECK_AFFINITY_VIOLATIONS, + /** Cores are powered, kbase_jd_atom::affinity up-to-date, no affinity violations: atom can be submitted to HW */ + KBASE_ATOM_COREREF_STATE_READY +}; + +/* + * Base Atom priority + * + * Only certain priority levels are actually implemented, as specified by the + * BASE_JD_PRIO_<...> definitions below. It is undefined to use a priority + * level that is not one of those defined below. + * + * Priority levels only affect scheduling between atoms of the same type within + * a base context, and only after the atoms have had dependencies resolved. + * Fragment atoms does not affect non-frament atoms with lower priorities, and + * the other way around. For example, a low priority atom that has had its + * dependencies resolved might run before a higher priority atom that has not + * had its dependencies resolved. + * + * The scheduling between base contexts/processes and between atoms from + * different base contexts/processes is unaffected by atom priority. + * + * The atoms are scheduled as follows with respect to their priorities: + * - Let atoms 'X' and 'Y' be for the same job slot who have dependencies + * resolved, and atom 'X' has a higher priority than atom 'Y' + * - If atom 'Y' is currently running on the HW, then it is interrupted to + * allow atom 'X' to run soon after + * - If instead neither atom 'Y' nor atom 'X' are running, then when choosing + * the next atom to run, atom 'X' will always be chosen instead of atom 'Y' + * - Any two atoms that have the same priority could run in any order with + * respect to each other. That is, there is no ordering constraint between + * atoms of the same priority. + */ +typedef u8 base_jd_prio; + +/* Medium atom priority. This is a priority higher than BASE_JD_PRIO_LOW */ +#define BASE_JD_PRIO_MEDIUM ((base_jd_prio)0) +/* High atom priority. This is a priority higher than BASE_JD_PRIO_MEDIUM and + * BASE_JD_PRIO_LOW */ +#define BASE_JD_PRIO_HIGH ((base_jd_prio)1) +/* Low atom priority. */ +#define BASE_JD_PRIO_LOW ((base_jd_prio)2) + +/* Count of the number of priority levels. This itself is not a valid + * base_jd_prio setting */ +#define BASE_JD_NR_PRIO_LEVELS 3 + +enum kbase_jd_atom_state { + /** Atom is not used */ + KBASE_JD_ATOM_STATE_UNUSED, + /** Atom is queued in JD */ + KBASE_JD_ATOM_STATE_QUEUED, + /** Atom has been given to JS (is runnable/running) */ + KBASE_JD_ATOM_STATE_IN_JS, + /** Atom has been completed, but not yet handed back to job dispatcher + * for dependency resolution */ + KBASE_JD_ATOM_STATE_HW_COMPLETED, + /** Atom has been completed, but not yet handed back to userspace */ + KBASE_JD_ATOM_STATE_COMPLETED +}; + +typedef u8 base_atom_id; /**< Type big enough to store an atom number in */ + +struct base_dependency { + base_atom_id atom_id; /**< An atom number */ + base_jd_dep_type dependency_type; /**< Dependency type */ +}; + +/* This structure has changed since UK 10.2 for which base_jd_core_req was a u16 value. + * In order to keep the size of the structure same, padding field has been adjusted + * accordingly and core_req field of a u32 type (to which UK 10.3 base_jd_core_req defines) + * is added at the end of the structure. Place in the structure previously occupied by u16 core_req + * is kept but renamed to compat_core_req and as such it can be used in ioctl call for job submission + * as long as UK 10.2 legacy is supported. Once when this support ends, this field can be left + * for possible future use. */ +typedef struct base_jd_atom_v2 { + u64 jc; /**< job-chain GPU address */ + struct base_jd_udata udata; /**< user data */ + u64 extres_list; /**< list of external resources */ + u16 nr_extres; /**< nr of external resources */ + u16 compat_core_req; /**< core requirements which correspond to the legacy support for UK 10.2 */ + struct base_dependency pre_dep[2]; /**< pre-dependencies, one need to use SETTER function to assign this field, + this is done in order to reduce possibility of improper assigment of a dependency field */ + base_atom_id atom_number; /**< unique number to identify the atom */ + base_jd_prio prio; /**< Atom priority. Refer to @ref base_jd_prio for more details */ + u8 device_nr; /**< coregroup when BASE_JD_REQ_SPECIFIC_COHERENT_GROUP specified */ + u8 padding[1]; + base_jd_core_req core_req; /**< core requirements */ +} base_jd_atom_v2; + +typedef enum base_external_resource_access { + BASE_EXT_RES_ACCESS_SHARED, + BASE_EXT_RES_ACCESS_EXCLUSIVE +} base_external_resource_access; + +typedef struct base_external_resource { + u64 ext_resource; +} base_external_resource; + + +/** + * The maximum number of external resources which can be mapped/unmapped + * in a single request. + */ +#define BASE_EXT_RES_COUNT_MAX 10 + +/** + * struct base_external_resource_list - Structure which describes a list of + * external resources. + * @count: The number of resources. + * @ext_res: Array of external resources which is + * sized at allocation time. + */ +struct base_external_resource_list { + u64 count; + struct base_external_resource ext_res[1]; +}; + +struct base_jd_debug_copy_buffer { + u64 address; + u64 size; + struct base_external_resource extres; +}; + +/** + * @brief Setter for a dependency structure + * + * @param[in] dep The kbase jd atom dependency to be initialized. + * @param id The atom_id to be assigned. + * @param dep_type The dep_type to be assigned. + * + */ +static inline void base_jd_atom_dep_set(struct base_dependency *dep, + base_atom_id id, base_jd_dep_type dep_type) +{ + LOCAL_ASSERT(dep != NULL); + + /* + * make sure we don't set not allowed combinations + * of atom_id/dependency_type. + */ + LOCAL_ASSERT((id == 0 && dep_type == BASE_JD_DEP_TYPE_INVALID) || + (id > 0 && dep_type != BASE_JD_DEP_TYPE_INVALID)); + + dep->atom_id = id; + dep->dependency_type = dep_type; +} + +/** + * @brief Make a copy of a dependency structure + * + * @param[in,out] dep The kbase jd atom dependency to be written. + * @param[in] from The dependency to make a copy from. + * + */ +static inline void base_jd_atom_dep_copy(struct base_dependency *dep, + const struct base_dependency *from) +{ + LOCAL_ASSERT(dep != NULL); + + base_jd_atom_dep_set(dep, from->atom_id, from->dependency_type); +} + +/** + * @brief Soft-atom fence trigger setup. + * + * Sets up an atom to be a SW-only atom signaling a fence + * when it reaches the run state. + * + * Using the existing base dependency system the fence can + * be set to trigger when a GPU job has finished. + * + * The base fence object must not be terminated until the atom + * has been submitted to @ref base_jd_submit and @ref base_jd_submit + * has returned. + * + * @a fence must be a valid fence set up with @a base_fence_init. + * Calling this function with a uninitialized fence results in undefined behavior. + * + * @param[out] atom A pre-allocated atom to configure as a fence trigger SW atom + * @param[in] fence The base fence object to trigger. + * + * @pre @p fence must reference a @ref base_fence successfully initialized by + * calling @ref base_fence_init. + * @pre @p fence was @e not initialized by calling @ref base_fence_import, nor + * is it associated with a fence-trigger job that was already submitted + * by calling @ref base_jd_submit. + * @post @p atom can be submitted by calling @ref base_jd_submit. + */ +static inline void base_jd_fence_trigger_setup_v2(struct base_jd_atom_v2 *atom, struct base_fence *fence) +{ + LOCAL_ASSERT(atom); + LOCAL_ASSERT(fence); + LOCAL_ASSERT(fence->basep.fd == INVALID_PLATFORM_FENCE); + LOCAL_ASSERT(fence->basep.stream_fd >= 0); + atom->jc = (uintptr_t) fence; + atom->core_req = BASE_JD_REQ_SOFT_FENCE_TRIGGER; +} + +/** + * @brief Soft-atom fence wait setup. + * + * Sets up an atom to be a SW-only atom waiting on a fence. + * When the fence becomes triggered the atom becomes runnable + * and completes immediately. + * + * Using the existing base dependency system the fence can + * be set to block a GPU job until it has been triggered. + * + * The base fence object must not be terminated until the atom + * has been submitted to @ref base_jd_submit and + * @ref base_jd_submit has returned. + * + * @param[out] atom A pre-allocated atom to configure as a fence wait SW atom + * @param[in] fence The base fence object to wait on + * + * @pre @p fence must reference a @ref base_fence successfully initialized by + * calling @ref base_fence_import, or it must be associated with a + * fence-trigger job that was already submitted by calling + * @ref base_jd_submit. + * @post @p atom can be submitted by calling @ref base_jd_submit. + */ +static inline void base_jd_fence_wait_setup_v2(struct base_jd_atom_v2 *atom, struct base_fence *fence) +{ + LOCAL_ASSERT(atom); + LOCAL_ASSERT(fence); + LOCAL_ASSERT(fence->basep.fd >= 0); + atom->jc = (uintptr_t) fence; + atom->core_req = BASE_JD_REQ_SOFT_FENCE_WAIT; +} + +/** + * @brief External resource info initialization. + * + * Sets up an external resource object to reference + * a memory allocation and the type of access requested. + * + * @param[in] res The resource object to initialize + * @param handle The handle to the imported memory object, must be + * obtained by calling @ref base_mem_as_import_handle(). + * @param access The type of access requested + */ +static inline void base_external_resource_init(struct base_external_resource *res, struct base_import_handle handle, base_external_resource_access access) +{ + u64 address; + + address = handle.basep.handle; + + LOCAL_ASSERT(res != NULL); + LOCAL_ASSERT(0 == (address & LOCAL_PAGE_LSB)); + LOCAL_ASSERT(access == BASE_EXT_RES_ACCESS_SHARED || access == BASE_EXT_RES_ACCESS_EXCLUSIVE); + + res->ext_resource = address | (access & LOCAL_PAGE_LSB); +} + +/** + * @brief Job chain event code bits + * Defines the bits used to create ::base_jd_event_code + */ +enum { + BASE_JD_SW_EVENT_KERNEL = (1u << 15), /**< Kernel side event */ + BASE_JD_SW_EVENT = (1u << 14), /**< SW defined event */ + BASE_JD_SW_EVENT_SUCCESS = (1u << 13), /**< Event idicates success (SW events only) */ + BASE_JD_SW_EVENT_JOB = (0u << 11), /**< Job related event */ + BASE_JD_SW_EVENT_BAG = (1u << 11), /**< Bag related event */ + BASE_JD_SW_EVENT_INFO = (2u << 11), /**< Misc/info event */ + BASE_JD_SW_EVENT_RESERVED = (3u << 11), /**< Reserved event type */ + BASE_JD_SW_EVENT_TYPE_MASK = (3u << 11) /**< Mask to extract the type from an event code */ +}; + +/** + * @brief Job chain event codes + * + * HW and low-level SW events are represented by event codes. + * The status of jobs which succeeded are also represented by + * an event code (see ::BASE_JD_EVENT_DONE). + * Events are usually reported as part of a ::base_jd_event. + * + * The event codes are encoded in the following way: + * @li 10:0 - subtype + * @li 12:11 - type + * @li 13 - SW success (only valid if the SW bit is set) + * @li 14 - SW event (HW event if not set) + * @li 15 - Kernel event (should never be seen in userspace) + * + * Events are split up into ranges as follows: + * - BASE_JD_EVENT_RANGE_\_START + * - BASE_JD_EVENT_RANGE_\_END + * + * \a code is in \'s range when: + * - BASE_JD_EVENT_RANGE_\_START <= code < BASE_JD_EVENT_RANGE_\_END + * + * Ranges can be asserted for adjacency by testing that the END of the previous + * is equal to the START of the next. This is useful for optimizing some tests + * for range. + * + * A limitation is that the last member of this enum must explicitly be handled + * (with an assert-unreachable statement) in switch statements that use + * variables of this type. Otherwise, the compiler warns that we have not + * handled that enum value. + */ +typedef enum base_jd_event_code { + /* HW defined exceptions */ + + /** Start of HW Non-fault status codes + * + * @note Obscurely, BASE_JD_EVENT_TERMINATED indicates a real fault, + * because the job was hard-stopped + */ + BASE_JD_EVENT_RANGE_HW_NONFAULT_START = 0, + + /* non-fatal exceptions */ + BASE_JD_EVENT_NOT_STARTED = 0x00, /**< Can't be seen by userspace, treated as 'previous job done' */ + BASE_JD_EVENT_DONE = 0x01, + BASE_JD_EVENT_STOPPED = 0x03, /**< Can't be seen by userspace, becomes TERMINATED, DONE or JOB_CANCELLED */ + BASE_JD_EVENT_TERMINATED = 0x04, /**< This is actually a fault status code - the job was hard stopped */ + BASE_JD_EVENT_ACTIVE = 0x08, /**< Can't be seen by userspace, jobs only returned on complete/fail/cancel */ + + /** End of HW Non-fault status codes + * + * @note Obscurely, BASE_JD_EVENT_TERMINATED indicates a real fault, + * because the job was hard-stopped + */ + BASE_JD_EVENT_RANGE_HW_NONFAULT_END = 0x40, + + /** Start of HW fault and SW Error status codes */ + BASE_JD_EVENT_RANGE_HW_FAULT_OR_SW_ERROR_START = 0x40, + + /* job exceptions */ + BASE_JD_EVENT_JOB_CONFIG_FAULT = 0x40, + BASE_JD_EVENT_JOB_POWER_FAULT = 0x41, + BASE_JD_EVENT_JOB_READ_FAULT = 0x42, + BASE_JD_EVENT_JOB_WRITE_FAULT = 0x43, + BASE_JD_EVENT_JOB_AFFINITY_FAULT = 0x44, + BASE_JD_EVENT_JOB_BUS_FAULT = 0x48, + BASE_JD_EVENT_INSTR_INVALID_PC = 0x50, + BASE_JD_EVENT_INSTR_INVALID_ENC = 0x51, + BASE_JD_EVENT_INSTR_TYPE_MISMATCH = 0x52, + BASE_JD_EVENT_INSTR_OPERAND_FAULT = 0x53, + BASE_JD_EVENT_INSTR_TLS_FAULT = 0x54, + BASE_JD_EVENT_INSTR_BARRIER_FAULT = 0x55, + BASE_JD_EVENT_INSTR_ALIGN_FAULT = 0x56, + BASE_JD_EVENT_DATA_INVALID_FAULT = 0x58, + BASE_JD_EVENT_TILE_RANGE_FAULT = 0x59, + BASE_JD_EVENT_STATE_FAULT = 0x5A, + BASE_JD_EVENT_OUT_OF_MEMORY = 0x60, + BASE_JD_EVENT_UNKNOWN = 0x7F, + + /* GPU exceptions */ + BASE_JD_EVENT_DELAYED_BUS_FAULT = 0x80, + BASE_JD_EVENT_SHAREABILITY_FAULT = 0x88, + + /* MMU exceptions */ + BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL1 = 0xC1, + BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL2 = 0xC2, + BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL3 = 0xC3, + BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL4 = 0xC4, + BASE_JD_EVENT_PERMISSION_FAULT = 0xC8, + BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL1 = 0xD1, + BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL2 = 0xD2, + BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL3 = 0xD3, + BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL4 = 0xD4, + BASE_JD_EVENT_ACCESS_FLAG = 0xD8, + + /* SW defined exceptions */ + BASE_JD_EVENT_MEM_GROWTH_FAILED = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x000, + BASE_JD_EVENT_TIMED_OUT = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x001, + BASE_JD_EVENT_JOB_CANCELLED = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x002, + BASE_JD_EVENT_JOB_INVALID = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x003, + BASE_JD_EVENT_PM_EVENT = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x004, + BASE_JD_EVENT_FORCE_REPLAY = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x005, + + BASE_JD_EVENT_BAG_INVALID = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_BAG | 0x003, + + /** End of HW fault and SW Error status codes */ + BASE_JD_EVENT_RANGE_HW_FAULT_OR_SW_ERROR_END = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_RESERVED | 0x3FF, + + /** Start of SW Success status codes */ + BASE_JD_EVENT_RANGE_SW_SUCCESS_START = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_SUCCESS | 0x000, + + BASE_JD_EVENT_PROGRESS_REPORT = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_SUCCESS | BASE_JD_SW_EVENT_JOB | 0x000, + BASE_JD_EVENT_BAG_DONE = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_SUCCESS | BASE_JD_SW_EVENT_BAG | 0x000, + BASE_JD_EVENT_DRV_TERMINATED = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_SUCCESS | BASE_JD_SW_EVENT_INFO | 0x000, + + /** End of SW Success status codes */ + BASE_JD_EVENT_RANGE_SW_SUCCESS_END = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_SUCCESS | BASE_JD_SW_EVENT_RESERVED | 0x3FF, + + /** Start of Kernel-only status codes. Such codes are never returned to user-space */ + BASE_JD_EVENT_RANGE_KERNEL_ONLY_START = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_KERNEL | 0x000, + BASE_JD_EVENT_REMOVED_FROM_NEXT = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_KERNEL | BASE_JD_SW_EVENT_JOB | 0x000, + + /** End of Kernel-only status codes. */ + BASE_JD_EVENT_RANGE_KERNEL_ONLY_END = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_KERNEL | BASE_JD_SW_EVENT_RESERVED | 0x3FF +} base_jd_event_code; + +/** + * @brief Event reporting structure + * + * This structure is used by the kernel driver to report information + * about GPU events. The can either be HW-specific events or low-level + * SW events, such as job-chain completion. + * + * The event code contains an event type field which can be extracted + * by ANDing with ::BASE_JD_SW_EVENT_TYPE_MASK. + * + * Based on the event type base_jd_event::data holds: + * @li ::BASE_JD_SW_EVENT_JOB : the offset in the ring-buffer for the completed + * job-chain + * @li ::BASE_JD_SW_EVENT_BAG : The address of the ::base_jd_bag that has + * been completed (ie all contained job-chains have been completed). + * @li ::BASE_JD_SW_EVENT_INFO : base_jd_event::data not used + */ +typedef struct base_jd_event_v2 { + base_jd_event_code event_code; /**< event code */ + base_atom_id atom_number; /**< the atom number that has completed */ + struct base_jd_udata udata; /**< user data */ +} base_jd_event_v2; + +/** + * @brief Structure for BASE_JD_REQ_SOFT_DUMP_CPU_GPU_COUNTERS jobs. + * + * This structure is stored into the memory pointed to by the @c jc field + * of @ref base_jd_atom. + * + * It must not occupy the same CPU cache line(s) as any neighboring data. + * This is to avoid cases where access to pages containing the structure + * is shared between cached and un-cached memory regions, which would + * cause memory corruption. + */ + +typedef struct base_dump_cpu_gpu_counters { + u64 system_time; + u64 cycle_counter; + u64 sec; + u32 usec; + u8 padding[36]; +} base_dump_cpu_gpu_counters; + +/** @} end group base_user_api_job_dispatch */ + +#define GPU_MAX_JOB_SLOTS 16 + +/** + * @page page_base_user_api_gpuprops User-side Base GPU Property Query API + * + * The User-side Base GPU Property Query API encapsulates two + * sub-modules: + * + * - @ref base_user_api_gpuprops_dyn "Dynamic GPU Properties" + * - @ref base_plat_config_gpuprops "Base Platform Config GPU Properties" + * + * There is a related third module outside of Base, which is owned by the MIDG + * module: + * - @ref gpu_props_static "Midgard Compile-time GPU Properties" + * + * Base only deals with properties that vary between different Midgard + * implementations - the Dynamic GPU properties and the Platform Config + * properties. + * + * For properties that are constant for the Midgard Architecture, refer to the + * MIDG module. However, we will discuss their relevance here just to + * provide background information. + * + * @section sec_base_user_api_gpuprops_about About the GPU Properties in Base and MIDG modules + * + * The compile-time properties (Platform Config, Midgard Compile-time + * properties) are exposed as pre-processor macros. + * + * Complementing the compile-time properties are the Dynamic GPU + * Properties, which act as a conduit for the Midgard Configuration + * Discovery. + * + * In general, the dynamic properties are present to verify that the platform + * has been configured correctly with the right set of Platform Config + * Compile-time Properties. + * + * As a consistent guide across the entire DDK, the choice for dynamic or + * compile-time should consider the following, in order: + * -# Can the code be written so that it doesn't need to know the + * implementation limits at all? + * -# If you need the limits, get the information from the Dynamic Property + * lookup. This should be done once as you fetch the context, and then cached + * as part of the context data structure, so it's cheap to access. + * -# If there's a clear and arguable inefficiency in using Dynamic Properties, + * then use a Compile-Time Property (Platform Config, or Midgard Compile-time + * property). Examples of where this might be sensible follow: + * - Part of a critical inner-loop + * - Frequent re-use throughout the driver, causing significant extra load + * instructions or control flow that would be worthwhile optimizing out. + * + * We cannot provide an exhaustive set of examples, neither can we provide a + * rule for every possible situation. Use common sense, and think about: what + * the rest of the driver will be doing; how the compiler might represent the + * value if it is a compile-time constant; whether an OEM shipping multiple + * devices would benefit much more from a single DDK binary, instead of + * insignificant micro-optimizations. + * + * @section sec_base_user_api_gpuprops_dyn Dynamic GPU Properties + * + * Dynamic GPU properties are presented in two sets: + * -# the commonly used properties in @ref base_gpu_props, which have been + * unpacked from GPU register bitfields. + * -# The full set of raw, unprocessed properties in @ref gpu_raw_gpu_props + * (also a member of @ref base_gpu_props). All of these are presented in + * the packed form, as presented by the GPU registers themselves. + * + * @usecase The raw properties in @ref gpu_raw_gpu_props are necessary to + * allow a user of the Mali Tools (e.g. PAT) to determine "Why is this device + * behaving differently?". In this case, all information about the + * configuration is potentially useful, but it does not need to be processed + * by the driver. Instead, the raw registers can be processed by the Mali + * Tools software on the host PC. + * + * The properties returned extend the Midgard Configuration Discovery + * registers. For example, GPU clock speed is not specified in the Midgard + * Architecture, but is necessary for OpenCL's clGetDeviceInfo() function. + * + * The GPU properties are obtained by a call to + * _mali_base_get_gpu_props(). This simply returns a pointer to a const + * base_gpu_props structure. It is constant for the life of a base + * context. Multiple calls to _mali_base_get_gpu_props() to a base context + * return the same pointer to a constant structure. This avoids cache pollution + * of the common data. + * + * This pointer must not be freed, because it does not point to the start of a + * region allocated by the memory allocator; instead, just close the @ref + * base_context. + * + * + * @section sec_base_user_api_gpuprops_config Platform Config Compile-time Properties + * + * The Platform Config File sets up gpu properties that are specific to a + * certain platform. Properties that are 'Implementation Defined' in the + * Midgard Architecture spec are placed here. + * + * @note Reference configurations are provided for Midgard Implementations, such as + * the Mali-T600 family. The customer need not repeat this information, and can select one of + * these reference configurations. For example, VA_BITS, PA_BITS and the + * maximum number of samples per pixel might vary between Midgard Implementations, but + * \b not for platforms using the Mali-T604. This information is placed in + * the reference configuration files. + * + * The System Integrator creates the following structure: + * - platform_XYZ + * - platform_XYZ/plat + * - platform_XYZ/plat/plat_config.h + * + * They then edit plat_config.h, using the example plat_config.h files as a + * guide. + * + * At the very least, the customer must set @ref CONFIG_GPU_CORE_TYPE, and will + * receive a helpful \#error message if they do not do this correctly. This + * selects the Reference Configuration for the Midgard Implementation. The rationale + * behind this decision (against asking the customer to write \#include + * in their plat_config.h) is as follows: + * - This mechanism 'looks' like a regular config file (such as Linux's + * .config) + * - It is difficult to get wrong in a way that will produce strange build + * errors: + * - They need not know where the mali_t600.h, other_midg_gpu.h etc. files are stored - and + * so they won't accidentally pick another file with 'mali_t600' in its name + * - When the build doesn't work, the System Integrator may think the DDK is + * doesn't work, and attempt to fix it themselves: + * - For the @ref CONFIG_GPU_CORE_TYPE mechanism, the only way to get past the + * error is to set @ref CONFIG_GPU_CORE_TYPE, and this is what the \#error tells + * you. + * - For a \#include mechanism, checks must still be made elsewhere, which the + * System Integrator may try working around by setting \#defines (such as + * VA_BITS) themselves in their plat_config.h. In the worst case, they may + * set the prevention-mechanism \#define of + * "A_CORRECT_MIDGARD_CORE_WAS_CHOSEN". + * - In this case, they would believe they are on the right track, because + * the build progresses with their fix, but with errors elsewhere. + * + * However, there is nothing to prevent the customer using \#include to organize + * their own configurations files hierarchically. + * + * The mechanism for the header file processing is as follows: + * + * @dot + digraph plat_config_mechanism { + rankdir=BT + size="6,6" + + "mali_base.h"; + "gpu/mali_gpu.h"; + + node [ shape=box ]; + { + rank = same; ordering = out; + + "gpu/mali_gpu_props.h"; + "base/midg_gpus/mali_t600.h"; + "base/midg_gpus/other_midg_gpu.h"; + } + { rank = same; "plat/plat_config.h"; } + { + rank = same; + "gpu/mali_gpu.h" [ shape=box ]; + gpu_chooser [ label="" style="invisible" width=0 height=0 fixedsize=true ]; + select_gpu [ label="Mali-T600 | Other\n(select_gpu.h)" shape=polygon,sides=4,distortion=0.25 width=3.3 height=0.99 fixedsize=true ] ; + } + node [ shape=box ]; + { rank = same; "plat/plat_config.h"; } + { rank = same; "mali_base.h"; } + + "mali_base.h" -> "gpu/mali_gpu.h" -> "gpu/mali_gpu_props.h"; + "mali_base.h" -> "plat/plat_config.h" ; + "mali_base.h" -> select_gpu ; + + "plat/plat_config.h" -> gpu_chooser [style="dotted,bold" dir=none weight=4] ; + gpu_chooser -> select_gpu [style="dotted,bold"] ; + + select_gpu -> "base/midg_gpus/mali_t600.h" ; + select_gpu -> "base/midg_gpus/other_midg_gpu.h" ; + } + @enddot + * + * + * @section sec_base_user_api_gpuprops_kernel Kernel Operation + * + * During Base Context Create time, user-side makes a single kernel call: + * - A call to fill user memory with GPU information structures + * + * The kernel-side will fill the provided the entire processed @ref base_gpu_props + * structure, because this information is required in both + * user and kernel side; it does not make sense to decode it twice. + * + * Coherency groups must be derived from the bitmasks, but this can be done + * kernel side, and just once at kernel startup: Coherency groups must already + * be known kernel-side, to support chains that specify a 'Only Coherent Group' + * SW requirement, or 'Only Coherent Group with Tiler' SW requirement. + * + * @section sec_base_user_api_gpuprops_cocalc Coherency Group calculation + * Creation of the coherent group data is done at device-driver startup, and so + * is one-time. This will most likely involve a loop with CLZ, shifting, and + * bit clearing on the L2_PRESENT mask, depending on whether the + * system is L2 Coherent. The number of shader cores is done by a + * population count, since faulty cores may be disabled during production, + * producing a non-contiguous mask. + * + * The memory requirements for this algorithm can be determined either by a u64 + * population count on the L2_PRESENT mask (a LUT helper already is + * required for the above), or simple assumption that there can be no more than + * 16 coherent groups, since core groups are typically 4 cores. + */ + +/** + * @addtogroup base_user_api_gpuprops User-side Base GPU Property Query APIs + * @{ + */ + +/** + * @addtogroup base_user_api_gpuprops_dyn Dynamic HW Properties + * @{ + */ + +#define BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS 3 + +#define BASE_MAX_COHERENT_GROUPS 16 + +struct mali_base_gpu_core_props { + /** + * Product specific value. + */ + u32 product_id; + + /** + * Status of the GPU release. + * No defined values, but starts at 0 and increases by one for each + * release status (alpha, beta, EAC, etc.). + * 4 bit values (0-15). + */ + u16 version_status; + + /** + * Minor release number of the GPU. "P" part of an "RnPn" release number. + * 8 bit values (0-255). + */ + u16 minor_revision; + + /** + * Major release number of the GPU. "R" part of an "RnPn" release number. + * 4 bit values (0-15). + */ + u16 major_revision; + + u16 padding; + + /** + * This property is deprecated since it has not contained the real current + * value of GPU clock speed. It is kept here only for backwards compatibility. + * For the new ioctl interface, it is ignored and is treated as a padding + * to keep the structure of the same size and retain the placement of its + * members. + */ + u32 gpu_speed_mhz; + + /** + * @usecase GPU clock max/min speed is required for computing best/worst case + * in tasks as job scheduling ant irq_throttling. (It is not specified in the + * Midgard Architecture). + * Also, GPU clock max speed is used for OpenCL's clGetDeviceInfo() function. + */ + u32 gpu_freq_khz_max; + u32 gpu_freq_khz_min; + + /** + * Size of the shader program counter, in bits. + */ + u32 log2_program_counter_size; + + /** + * TEXTURE_FEATURES_x registers, as exposed by the GPU. This is a + * bitpattern where a set bit indicates that the format is supported. + * + * Before using a texture format, it is recommended that the corresponding + * bit be checked. + */ + u32 texture_features[BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS]; + + /** + * Theoretical maximum memory available to the GPU. It is unlikely that a + * client will be able to allocate all of this memory for their own + * purposes, but this at least provides an upper bound on the memory + * available to the GPU. + * + * This is required for OpenCL's clGetDeviceInfo() call when + * CL_DEVICE_GLOBAL_MEM_SIZE is requested, for OpenCL GPU devices. The + * client will not be expecting to allocate anywhere near this value. + */ + u64 gpu_available_memory_size; +}; + +/** + * + * More information is possible - but associativity and bus width are not + * required by upper-level apis. + */ +struct mali_base_gpu_l2_cache_props { + u8 log2_line_size; + u8 log2_cache_size; + u8 num_l2_slices; /* Number of L2C slices. 1 or higher */ + u8 padding[5]; +}; + +struct mali_base_gpu_tiler_props { + u32 bin_size_bytes; /* Max is 4*2^15 */ + u32 max_active_levels; /* Max is 2^15 */ +}; + +/** + * GPU threading system details. + */ +struct mali_base_gpu_thread_props { + u32 max_threads; /* Max. number of threads per core */ + u32 max_workgroup_size; /* Max. number of threads per workgroup */ + u32 max_barrier_size; /* Max. number of threads that can synchronize on a simple barrier */ + u16 max_registers; /* Total size [1..65535] of the register file available per core. */ + u8 max_task_queue; /* Max. tasks [1..255] which may be sent to a core before it becomes blocked. */ + u8 max_thread_group_split; /* Max. allowed value [1..15] of the Thread Group Split field. */ + u8 impl_tech; /* 0 = Not specified, 1 = Silicon, 2 = FPGA, 3 = SW Model/Emulation */ + u8 padding[7]; +}; + +/** + * @brief descriptor for a coherent group + * + * \c core_mask exposes all cores in that coherent group, and \c num_cores + * provides a cached population-count for that mask. + * + * @note Whilst all cores are exposed in the mask, not all may be available to + * the application, depending on the Kernel Power policy. + * + * @note if u64s must be 8-byte aligned, then this structure has 32-bits of wastage. + */ +struct mali_base_gpu_coherent_group { + u64 core_mask; /**< Core restriction mask required for the group */ + u16 num_cores; /**< Number of cores in the group */ + u16 padding[3]; +}; + +/** + * @brief Coherency group information + * + * Note that the sizes of the members could be reduced. However, the \c group + * member might be 8-byte aligned to ensure the u64 core_mask is 8-byte + * aligned, thus leading to wastage if the other members sizes were reduced. + * + * The groups are sorted by core mask. The core masks are non-repeating and do + * not intersect. + */ +struct mali_base_gpu_coherent_group_info { + u32 num_groups; + + /** + * Number of core groups (coherent or not) in the GPU. Equivalent to the number of L2 Caches. + * + * The GPU Counter dumping writes 2048 bytes per core group, regardless of + * whether the core groups are coherent or not. Hence this member is needed + * to calculate how much memory is required for dumping. + * + * @note Do not use it to work out how many valid elements are in the + * group[] member. Use num_groups instead. + */ + u32 num_core_groups; + + /** + * Coherency features of the memory, accessed by @ref gpu_mem_features + * methods + */ + u32 coherency; + + u32 padding; + + /** + * Descriptors of coherent groups + */ + struct mali_base_gpu_coherent_group group[BASE_MAX_COHERENT_GROUPS]; +}; + +/** + * A complete description of the GPU's Hardware Configuration Discovery + * registers. + * + * The information is presented inefficiently for access. For frequent access, + * the values should be better expressed in an unpacked form in the + * base_gpu_props structure. + * + * @usecase The raw properties in @ref gpu_raw_gpu_props are necessary to + * allow a user of the Mali Tools (e.g. PAT) to determine "Why is this device + * behaving differently?". In this case, all information about the + * configuration is potentially useful, but it does not need to be processed + * by the driver. Instead, the raw registers can be processed by the Mali + * Tools software on the host PC. + * + */ +struct gpu_raw_gpu_props { + u64 shader_present; + u64 tiler_present; + u64 l2_present; + u64 stack_present; + + u32 l2_features; + u32 suspend_size; /* API 8.2+ */ + u32 mem_features; + u32 mmu_features; + + u32 as_present; + + u32 js_present; + u32 js_features[GPU_MAX_JOB_SLOTS]; + u32 tiler_features; + u32 texture_features[BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS]; + + u32 gpu_id; + + u32 thread_max_threads; + u32 thread_max_workgroup_size; + u32 thread_max_barrier_size; + u32 thread_features; + + /* + * Note: This is the _selected_ coherency mode rather than the + * available modes as exposed in the coherency_features register. + */ + u32 coherency_mode; +}; + +/** + * Return structure for _mali_base_get_gpu_props(). + * + * NOTE: the raw_props member in this data structure contains the register + * values from which the value of the other members are derived. The derived + * members exist to allow for efficient access and/or shielding the details + * of the layout of the registers. + * + */ +typedef struct mali_base_gpu_props { + struct mali_base_gpu_core_props core_props; + struct mali_base_gpu_l2_cache_props l2_props; + u64 unused_1; /* keep for backwards compatibility */ + struct mali_base_gpu_tiler_props tiler_props; + struct mali_base_gpu_thread_props thread_props; + + /** This member is large, likely to be 128 bytes */ + struct gpu_raw_gpu_props raw_props; + + /** This must be last member of the structure */ + struct mali_base_gpu_coherent_group_info coherency_info; +} base_gpu_props; + +/** @} end group base_user_api_gpuprops_dyn */ + +/** @} end group base_user_api_gpuprops */ + +/** + * @addtogroup base_user_api_core User-side Base core APIs + * @{ + */ + +/** + * \enum base_context_create_flags + * + * Flags to pass to ::base_context_init. + * Flags can be ORed together to enable multiple things. + * + * These share the same space as BASEP_CONTEXT_FLAG_*, and so must + * not collide with them. + */ +enum base_context_create_flags { + /** No flags set */ + BASE_CONTEXT_CREATE_FLAG_NONE = 0, + + /** Base context is embedded in a cctx object (flag used for CINSTR software counter macros) */ + BASE_CONTEXT_CCTX_EMBEDDED = (1u << 0), + + /** Base context is a 'System Monitor' context for Hardware counters. + * + * One important side effect of this is that job submission is disabled. */ + BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED = (1u << 1) +}; + +/** + * Bitpattern describing the ::base_context_create_flags that can be passed to base_context_init() + */ +#define BASE_CONTEXT_CREATE_ALLOWED_FLAGS \ + (((u32)BASE_CONTEXT_CCTX_EMBEDDED) | \ + ((u32)BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED)) + +/** + * Bitpattern describing the ::base_context_create_flags that can be passed to the kernel + */ +#define BASE_CONTEXT_CREATE_KERNEL_FLAGS \ + ((u32)BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED) + +/* + * Private flags used on the base context + * + * These start at bit 31, and run down to zero. + * + * They share the same space as @ref base_context_create_flags, and so must + * not collide with them. + */ +/** Private flag tracking whether job descriptor dumping is disabled */ +#define BASEP_CONTEXT_FLAG_JOB_DUMP_DISABLED ((u32)(1 << 31)) + +/** @} end group base_user_api_core */ + +/** @} end group base_user_api */ + +/** + * @addtogroup base_plat_config_gpuprops Base Platform Config GPU Properties + * @{ + * + * C Pre-processor macros are exposed here to do with Platform + * Config. + * + * These include: + * - GPU Properties that are constant on a particular Midgard Family + * Implementation e.g. Maximum samples per pixel on Mali-T600. + * - General platform config for the GPU, such as the GPU major and minor + * revison. + */ + +/** @} end group base_plat_config_gpuprops */ + +/** + * @addtogroup base_api Base APIs + * @{ + */ + +/** + * @brief The payload for a replay job. This must be in GPU memory. + */ +typedef struct base_jd_replay_payload { + /** + * Pointer to the first entry in the base_jd_replay_jc list. These + * will be replayed in @b reverse order (so that extra ones can be added + * to the head in future soft jobs without affecting this soft job) + */ + u64 tiler_jc_list; + + /** + * Pointer to the fragment job chain. + */ + u64 fragment_jc; + + /** + * Pointer to the tiler heap free FBD field to be modified. + */ + u64 tiler_heap_free; + + /** + * Hierarchy mask for the replayed fragment jobs. May be zero. + */ + u16 fragment_hierarchy_mask; + + /** + * Hierarchy mask for the replayed tiler jobs. May be zero. + */ + u16 tiler_hierarchy_mask; + + /** + * Default weight to be used for hierarchy levels not in the original + * mask. + */ + u32 hierarchy_default_weight; + + /** + * Core requirements for the tiler job chain + */ + base_jd_core_req tiler_core_req; + + /** + * Core requirements for the fragment job chain + */ + base_jd_core_req fragment_core_req; +} base_jd_replay_payload; + +#ifdef BASE_LEGACY_UK10_2_SUPPORT +typedef struct base_jd_replay_payload_uk10_2 { + u64 tiler_jc_list; + u64 fragment_jc; + u64 tiler_heap_free; + u16 fragment_hierarchy_mask; + u16 tiler_hierarchy_mask; + u32 hierarchy_default_weight; + u16 tiler_core_req; + u16 fragment_core_req; + u8 padding[4]; +} base_jd_replay_payload_uk10_2; +#endif /* BASE_LEGACY_UK10_2_SUPPORT */ + +/** + * @brief An entry in the linked list of job chains to be replayed. This must + * be in GPU memory. + */ +typedef struct base_jd_replay_jc { + /** + * Pointer to next entry in the list. A setting of NULL indicates the + * end of the list. + */ + u64 next; + + /** + * Pointer to the job chain. + */ + u64 jc; + +} base_jd_replay_jc; + +/* Maximum number of jobs allowed in a fragment chain in the payload of a + * replay job */ +#define BASE_JD_REPLAY_F_CHAIN_JOB_LIMIT 256 + +/** @} end group base_api */ + +typedef struct base_profiling_controls { + u32 profiling_controls[FBDUMP_CONTROL_MAX]; +} base_profiling_controls; + +/* Enable additional tracepoints for latency measurements (TL_ATOM_READY, + * TL_ATOM_DONE, TL_ATOM_PRIO_CHANGE, TL_ATOM_EVENT_POST) */ +#define BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS (1 << 0) + +/* Indicate that job dumping is enabled. This could affect certain timers + * to account for the performance impact. */ +#define BASE_TLSTREAM_JOB_DUMPING_ENABLED (1 << 1) + +#define BASE_TLSTREAM_FLAGS_MASK (BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS | \ + BASE_TLSTREAM_JOB_DUMPING_ENABLED) + +#endif /* _BASE_KERNEL_H_ */ diff --git a/drivers/gpu/arm/midgard/mali_base_mem_priv.h b/drivers/gpu/arm/midgard/mali_base_mem_priv.h new file mode 100644 index 00000000000000..4a98a72cc37a73 --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_base_mem_priv.h @@ -0,0 +1,52 @@ +/* + * + * (C) COPYRIGHT 2010-2014 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + + +#ifndef _BASE_MEM_PRIV_H_ +#define _BASE_MEM_PRIV_H_ + +#define BASE_SYNCSET_OP_MSYNC (1U << 0) +#define BASE_SYNCSET_OP_CSYNC (1U << 1) + +/* + * This structure describe a basic memory coherency operation. + * It can either be: + * @li a sync from CPU to Memory: + * - type = ::BASE_SYNCSET_OP_MSYNC + * - mem_handle = a handle to the memory object on which the operation + * is taking place + * - user_addr = the address of the range to be synced + * - size = the amount of data to be synced, in bytes + * - offset is ignored. + * @li a sync from Memory to CPU: + * - type = ::BASE_SYNCSET_OP_CSYNC + * - mem_handle = a handle to the memory object on which the operation + * is taking place + * - user_addr = the address of the range to be synced + * - size = the amount of data to be synced, in bytes. + * - offset is ignored. + */ +struct basep_syncset { + base_mem_handle mem_handle; + u64 user_addr; + u64 size; + u8 type; + u8 padding[7]; +}; + +#endif diff --git a/drivers/gpu/arm/midgard/mali_base_vendor_specific_func.h b/drivers/gpu/arm/midgard/mali_base_vendor_specific_func.h new file mode 100644 index 00000000000000..be454a216a3999 --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_base_vendor_specific_func.h @@ -0,0 +1,24 @@ +/* + * + * (C) COPYRIGHT 2010, 2012-2013, 2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + +#ifndef _BASE_VENDOR_SPEC_FUNC_H_ +#define _BASE_VENDOR_SPEC_FUNC_H_ + +int kbase_get_vendor_specific_cpu_clock_speed(u32 * const); + +#endif /*_BASE_VENDOR_SPEC_FUNC_H_*/ diff --git a/drivers/gpu/arm/midgard/mali_kbase.h b/drivers/gpu/arm/midgard/mali_kbase.h new file mode 100644 index 00000000000000..19d82b7f830cd3 --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase.h @@ -0,0 +1,612 @@ +/* + * + * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + + +#ifndef _KBASE_H_ +#define _KBASE_H_ + +#include + +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) +#include +#endif +#include +#include +#include +#include +#include + +#include "mali_base_kernel.h" +#include +#include + +/* + * Include mali_kbase_defs.h first as this provides types needed by other local + * header files. + */ +#include "mali_kbase_defs.h" + +#include "mali_kbase_context.h" +#include "mali_kbase_strings.h" +#include "mali_kbase_mem_lowlevel.h" +#include "mali_kbase_trace_timeline.h" +#include "mali_kbase_js.h" +#include "mali_kbase_mem.h" +#include "mali_kbase_utility.h" +#include "mali_kbase_gpu_memory_debugfs.h" +#include "mali_kbase_mem_profile_debugfs.h" +#include "mali_kbase_debug_job_fault.h" +#include "mali_kbase_jd_debugfs.h" +#include "mali_kbase_gpuprops.h" +#include "mali_kbase_jm.h" +#include "mali_kbase_vinstr.h" + +#include "ipa/mali_kbase_ipa.h" + +#ifdef CONFIG_GPU_TRACEPOINTS +#include +#endif + +#ifndef u64_to_user_ptr +/* Introduced in Linux v4.6 */ +#define u64_to_user_ptr(x) ((void __user *)(uintptr_t)x) +#endif + +/* + * Kernel-side Base (KBase) APIs + */ + +struct kbase_device *kbase_device_alloc(void); +/* +* note: configuration attributes member of kbdev needs to have +* been setup before calling kbase_device_init +*/ + +/* +* API to acquire device list semaphore and return pointer +* to the device list head +*/ +const struct list_head *kbase_dev_list_get(void); +/* API to release the device list semaphore */ +void kbase_dev_list_put(const struct list_head *dev_list); + +int kbase_device_init(struct kbase_device * const kbdev); +void kbase_device_term(struct kbase_device *kbdev); +void kbase_device_free(struct kbase_device *kbdev); +int kbase_device_has_feature(struct kbase_device *kbdev, u32 feature); + +/* Needed for gator integration and for reporting vsync information */ +struct kbase_device *kbase_find_device(int minor); +void kbase_release_device(struct kbase_device *kbdev); + +void kbase_set_profiling_control(struct kbase_device *kbdev, u32 control, u32 value); + +struct kbase_context * +kbase_create_context(struct kbase_device *kbdev, bool is_compat); +void kbase_destroy_context(struct kbase_context *kctx); + +int kbase_jd_init(struct kbase_context *kctx); +void kbase_jd_exit(struct kbase_context *kctx); + +/** + * kbase_jd_submit - Submit atoms to the job dispatcher + * + * @kctx: The kbase context to submit to + * @user_addr: The address in user space of the struct base_jd_atom_v2 array + * @nr_atoms: The number of atoms in the array + * @stride: sizeof(struct base_jd_atom_v2) + * @uk6_atom: true if the atoms are legacy atoms (struct base_jd_atom_v2_uk6) + * + * Return: 0 on success or error code + */ +int kbase_jd_submit(struct kbase_context *kctx, + void __user *user_addr, u32 nr_atoms, u32 stride, + bool uk6_atom); + +/** + * kbase_jd_done_worker - Handle a job completion + * @data: a &struct work_struct + * + * This function requeues the job from the runpool (if it was soft-stopped or + * removed from NEXT registers). + * + * Removes it from the system if it finished/failed/was cancelled. + * + * Resolves dependencies to add dependent jobs to the context, potentially + * starting them if necessary (which may add more references to the context) + * + * Releases the reference to the context from the no-longer-running job. + * + * Handles retrying submission outside of IRQ context if it failed from within + * IRQ context. + */ +void kbase_jd_done_worker(struct work_struct *data); + +void kbase_jd_done(struct kbase_jd_atom *katom, int slot_nr, ktime_t *end_timestamp, + kbasep_js_atom_done_code done_code); +void kbase_jd_cancel(struct kbase_device *kbdev, struct kbase_jd_atom *katom); +void kbase_jd_zap_context(struct kbase_context *kctx); +bool jd_done_nolock(struct kbase_jd_atom *katom, + struct list_head *completed_jobs_ctx); +void kbase_jd_free_external_resources(struct kbase_jd_atom *katom); +bool jd_submit_atom(struct kbase_context *kctx, + const struct base_jd_atom_v2 *user_atom, + struct kbase_jd_atom *katom); +void kbase_jd_dep_clear_locked(struct kbase_jd_atom *katom); + +void kbase_job_done(struct kbase_device *kbdev, u32 done); + +/** + * kbase_job_slot_ctx_priority_check_locked(): - Check for lower priority atoms + * and soft stop them + * @kctx: Pointer to context to check. + * @katom: Pointer to priority atom. + * + * Atoms from @kctx on the same job slot as @katom, which have lower priority + * than @katom will be soft stopped and put back in the queue, so that atoms + * with higher priority can run. + * + * The hwaccess_lock must be held when calling this function. + */ +void kbase_job_slot_ctx_priority_check_locked(struct kbase_context *kctx, + struct kbase_jd_atom *katom); + +void kbase_job_slot_softstop(struct kbase_device *kbdev, int js, + struct kbase_jd_atom *target_katom); +void kbase_job_slot_softstop_swflags(struct kbase_device *kbdev, int js, + struct kbase_jd_atom *target_katom, u32 sw_flags); +void kbase_job_slot_hardstop(struct kbase_context *kctx, int js, + struct kbase_jd_atom *target_katom); +void kbase_job_check_enter_disjoint(struct kbase_device *kbdev, u32 action, + base_jd_core_req core_reqs, struct kbase_jd_atom *target_katom); +void kbase_job_check_leave_disjoint(struct kbase_device *kbdev, + struct kbase_jd_atom *target_katom); + +void kbase_event_post(struct kbase_context *ctx, struct kbase_jd_atom *event); +int kbase_event_dequeue(struct kbase_context *ctx, struct base_jd_event_v2 *uevent); +int kbase_event_pending(struct kbase_context *ctx); +int kbase_event_init(struct kbase_context *kctx); +void kbase_event_close(struct kbase_context *kctx); +void kbase_event_cleanup(struct kbase_context *kctx); +void kbase_event_wakeup(struct kbase_context *kctx); + +int kbase_process_soft_job(struct kbase_jd_atom *katom); +int kbase_prepare_soft_job(struct kbase_jd_atom *katom); +void kbase_finish_soft_job(struct kbase_jd_atom *katom); +void kbase_cancel_soft_job(struct kbase_jd_atom *katom); +void kbase_resume_suspended_soft_jobs(struct kbase_device *kbdev); +void kbasep_remove_waiting_soft_job(struct kbase_jd_atom *katom); +#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE) +void kbase_soft_event_wait_callback(struct kbase_jd_atom *katom); +#endif +int kbase_soft_event_update(struct kbase_context *kctx, + u64 event, + unsigned char new_status); + +bool kbase_replay_process(struct kbase_jd_atom *katom); + +void kbasep_soft_job_timeout_worker(struct timer_list *t); +void kbasep_complete_triggered_soft_events(struct kbase_context *kctx, u64 evt); + +/* api used internally for register access. Contains validation and tracing */ +void kbase_device_trace_register_access(struct kbase_context *kctx, enum kbase_reg_access_type type, u16 reg_offset, u32 reg_value); +int kbase_device_trace_buffer_install( + struct kbase_context *kctx, u32 *tb, size_t size); +void kbase_device_trace_buffer_uninstall(struct kbase_context *kctx); + +void kbasep_as_do_poke(struct work_struct *work); + +/** Returns the name associated with a Mali exception code + * + * This function is called from the interrupt handler when a GPU fault occurs. + * It reports the details of the fault using KBASE_DEBUG_PRINT_WARN. + * + * @param[in] kbdev The kbase device that the GPU fault occurred from. + * @param[in] exception_code exception code + * @return name associated with the exception code + */ +const char *kbase_exception_name(struct kbase_device *kbdev, + u32 exception_code); + +/** + * Check whether a system suspend is in progress, or has already been suspended + * + * The caller should ensure that either kbdev->pm.active_count_lock is held, or + * a dmb was executed recently (to ensure the value is most + * up-to-date). However, without a lock the value could change afterwards. + * + * @return false if a suspend is not in progress + * @return !=false otherwise + */ +static inline bool kbase_pm_is_suspending(struct kbase_device *kbdev) +{ + return kbdev->pm.suspending; +} + +/** + * Return the atom's ID, as was originally supplied by userspace in + * base_jd_atom_v2::atom_number + */ +static inline int kbase_jd_atom_id(struct kbase_context *kctx, struct kbase_jd_atom *katom) +{ + int result; + + KBASE_DEBUG_ASSERT(kctx); + KBASE_DEBUG_ASSERT(katom); + KBASE_DEBUG_ASSERT(katom->kctx == kctx); + + result = katom - &kctx->jctx.atoms[0]; + KBASE_DEBUG_ASSERT(result >= 0 && result <= BASE_JD_ATOM_COUNT); + return result; +} + +/** + * kbase_jd_atom_from_id - Return the atom structure for the given atom ID + * @kctx: Context pointer + * @id: ID of atom to retrieve + * + * Return: Pointer to struct kbase_jd_atom associated with the supplied ID + */ +static inline struct kbase_jd_atom *kbase_jd_atom_from_id( + struct kbase_context *kctx, int id) +{ + return &kctx->jctx.atoms[id]; +} + +/** + * Initialize the disjoint state + * + * The disjoint event count and state are both set to zero. + * + * Disjoint functions usage: + * + * The disjoint event count should be incremented whenever a disjoint event occurs. + * + * There are several cases which are regarded as disjoint behavior. Rather than just increment + * the counter during disjoint events we also increment the counter when jobs may be affected + * by what the GPU is currently doing. To facilitate this we have the concept of disjoint state. + * + * Disjoint state is entered during GPU reset and for the entire time that an atom is replaying + * (as part of the replay workaround). Increasing the disjoint state also increases the count of + * disjoint events. + * + * The disjoint state is then used to increase the count of disjoint events during job submission + * and job completion. Any atom submitted or completed while the disjoint state is greater than + * zero is regarded as a disjoint event. + * + * The disjoint event counter is also incremented immediately whenever a job is soft stopped + * and during context creation. + * + * @param kbdev The kbase device + */ +void kbase_disjoint_init(struct kbase_device *kbdev); + +/** + * Increase the count of disjoint events + * called when a disjoint event has happened + * + * @param kbdev The kbase device + */ +void kbase_disjoint_event(struct kbase_device *kbdev); + +/** + * Increase the count of disjoint events only if the GPU is in a disjoint state + * + * This should be called when something happens which could be disjoint if the GPU + * is in a disjoint state. The state refcount keeps track of this. + * + * @param kbdev The kbase device + */ +void kbase_disjoint_event_potential(struct kbase_device *kbdev); + +/** + * Returns the count of disjoint events + * + * @param kbdev The kbase device + * @return the count of disjoint events + */ +u32 kbase_disjoint_event_get(struct kbase_device *kbdev); + +/** + * Increment the refcount state indicating that the GPU is in a disjoint state. + * + * Also Increment the disjoint event count (calls @ref kbase_disjoint_event) + * eventually after the disjoint state has completed @ref kbase_disjoint_state_down + * should be called + * + * @param kbdev The kbase device + */ +void kbase_disjoint_state_up(struct kbase_device *kbdev); + +/** + * Decrement the refcount state + * + * Also Increment the disjoint event count (calls @ref kbase_disjoint_event) + * + * Called after @ref kbase_disjoint_state_up once the disjoint state is over + * + * @param kbdev The kbase device + */ +void kbase_disjoint_state_down(struct kbase_device *kbdev); + +/** + * If a job is soft stopped and the number of contexts is >= this value + * it is reported as a disjoint event + */ +#define KBASE_DISJOINT_STATE_INTERLEAVED_CONTEXT_COUNT_THRESHOLD 2 + +#if !defined(UINT64_MAX) + #define UINT64_MAX ((uint64_t)0xFFFFFFFFFFFFFFFFULL) +#endif + +#if KBASE_TRACE_ENABLE +void kbasep_trace_debugfs_init(struct kbase_device *kbdev); + +#ifndef CONFIG_MALI_SYSTEM_TRACE +/** Add trace values about a job-slot + * + * @note Any functions called through this macro will still be evaluated in + * Release builds (CONFIG_MALI_DEBUG not defined). Therefore, when KBASE_TRACE_ENABLE == 0 any + * functions called to get the parameters supplied to this macro must: + * - be static or static inline + * - must just return 0 and have no other statements present in the body. + */ +#define KBASE_TRACE_ADD_SLOT(kbdev, code, ctx, katom, gpu_addr, jobslot) \ + kbasep_trace_add(kbdev, KBASE_TRACE_CODE(code), ctx, katom, gpu_addr, \ + KBASE_TRACE_FLAG_JOBSLOT, 0, jobslot, 0) + +/** Add trace values about a job-slot, with info + * + * @note Any functions called through this macro will still be evaluated in + * Release builds (CONFIG_MALI_DEBUG not defined). Therefore, when KBASE_TRACE_ENABLE == 0 any + * functions called to get the parameters supplied to this macro must: + * - be static or static inline + * - must just return 0 and have no other statements present in the body. + */ +#define KBASE_TRACE_ADD_SLOT_INFO(kbdev, code, ctx, katom, gpu_addr, jobslot, info_val) \ + kbasep_trace_add(kbdev, KBASE_TRACE_CODE(code), ctx, katom, gpu_addr, \ + KBASE_TRACE_FLAG_JOBSLOT, 0, jobslot, info_val) + +/** Add trace values about a ctx refcount + * + * @note Any functions called through this macro will still be evaluated in + * Release builds (CONFIG_MALI_DEBUG not defined). Therefore, when KBASE_TRACE_ENABLE == 0 any + * functions called to get the parameters supplied to this macro must: + * - be static or static inline + * - must just return 0 and have no other statements present in the body. + */ +#define KBASE_TRACE_ADD_REFCOUNT(kbdev, code, ctx, katom, gpu_addr, refcount) \ + kbasep_trace_add(kbdev, KBASE_TRACE_CODE(code), ctx, katom, gpu_addr, \ + KBASE_TRACE_FLAG_REFCOUNT, refcount, 0, 0) +/** Add trace values about a ctx refcount, and info + * + * @note Any functions called through this macro will still be evaluated in + * Release builds (CONFIG_MALI_DEBUG not defined). Therefore, when KBASE_TRACE_ENABLE == 0 any + * functions called to get the parameters supplied to this macro must: + * - be static or static inline + * - must just return 0 and have no other statements present in the body. + */ +#define KBASE_TRACE_ADD_REFCOUNT_INFO(kbdev, code, ctx, katom, gpu_addr, refcount, info_val) \ + kbasep_trace_add(kbdev, KBASE_TRACE_CODE(code), ctx, katom, gpu_addr, \ + KBASE_TRACE_FLAG_REFCOUNT, refcount, 0, info_val) + +/** Add trace values (no slot or refcount) + * + * @note Any functions called through this macro will still be evaluated in + * Release builds (CONFIG_MALI_DEBUG not defined). Therefore, when KBASE_TRACE_ENABLE == 0 any + * functions called to get the parameters supplied to this macro must: + * - be static or static inline + * - must just return 0 and have no other statements present in the body. + */ +#define KBASE_TRACE_ADD(kbdev, code, ctx, katom, gpu_addr, info_val) \ + kbasep_trace_add(kbdev, KBASE_TRACE_CODE(code), ctx, katom, gpu_addr, \ + 0, 0, 0, info_val) + +/** Clear the trace */ +#define KBASE_TRACE_CLEAR(kbdev) \ + kbasep_trace_clear(kbdev) + +/** Dump the slot trace */ +#define KBASE_TRACE_DUMP(kbdev) \ + kbasep_trace_dump(kbdev) + +/** PRIVATE - do not use directly. Use KBASE_TRACE_ADD() instead */ +void kbasep_trace_add(struct kbase_device *kbdev, enum kbase_trace_code code, void *ctx, struct kbase_jd_atom *katom, u64 gpu_addr, u8 flags, int refcount, int jobslot, unsigned long info_val); +/** PRIVATE - do not use directly. Use KBASE_TRACE_CLEAR() instead */ +void kbasep_trace_clear(struct kbase_device *kbdev); +#else /* #ifndef CONFIG_MALI_SYSTEM_TRACE */ +/* Dispatch kbase trace events as system trace events */ +#include +#define KBASE_TRACE_ADD_SLOT(kbdev, code, ctx, katom, gpu_addr, jobslot)\ + trace_mali_##code(jobslot, 0) + +#define KBASE_TRACE_ADD_SLOT_INFO(kbdev, code, ctx, katom, gpu_addr, jobslot, info_val)\ + trace_mali_##code(jobslot, info_val) + +#define KBASE_TRACE_ADD_REFCOUNT(kbdev, code, ctx, katom, gpu_addr, refcount)\ + trace_mali_##code(refcount, 0) + +#define KBASE_TRACE_ADD_REFCOUNT_INFO(kbdev, code, ctx, katom, gpu_addr, refcount, info_val)\ + trace_mali_##code(refcount, info_val) + +#define KBASE_TRACE_ADD(kbdev, code, ctx, katom, gpu_addr, info_val)\ + trace_mali_##code(gpu_addr, info_val) + +#define KBASE_TRACE_CLEAR(kbdev)\ + do {\ + CSTD_UNUSED(kbdev);\ + CSTD_NOP(0);\ + } while (0) +#define KBASE_TRACE_DUMP(kbdev)\ + do {\ + CSTD_UNUSED(kbdev);\ + CSTD_NOP(0);\ + } while (0) + +#endif /* #ifndef CONFIG_MALI_SYSTEM_TRACE */ +#else +#define KBASE_TRACE_ADD_SLOT(kbdev, code, ctx, katom, gpu_addr, jobslot)\ + do {\ + CSTD_UNUSED(kbdev);\ + CSTD_NOP(code);\ + CSTD_UNUSED(ctx);\ + CSTD_UNUSED(katom);\ + CSTD_UNUSED(gpu_addr);\ + CSTD_UNUSED(jobslot);\ + } while (0) + +#define KBASE_TRACE_ADD_SLOT_INFO(kbdev, code, ctx, katom, gpu_addr, jobslot, info_val)\ + do {\ + CSTD_UNUSED(kbdev);\ + CSTD_NOP(code);\ + CSTD_UNUSED(ctx);\ + CSTD_UNUSED(katom);\ + CSTD_UNUSED(gpu_addr);\ + CSTD_UNUSED(jobslot);\ + CSTD_UNUSED(info_val);\ + CSTD_NOP(0);\ + } while (0) + +#define KBASE_TRACE_ADD_REFCOUNT(kbdev, code, ctx, katom, gpu_addr, refcount)\ + do {\ + CSTD_UNUSED(kbdev);\ + CSTD_NOP(code);\ + CSTD_UNUSED(ctx);\ + CSTD_UNUSED(katom);\ + CSTD_UNUSED(gpu_addr);\ + CSTD_UNUSED(refcount);\ + CSTD_NOP(0);\ + } while (0) + +#define KBASE_TRACE_ADD_REFCOUNT_INFO(kbdev, code, ctx, katom, gpu_addr, refcount, info_val)\ + do {\ + CSTD_UNUSED(kbdev);\ + CSTD_NOP(code);\ + CSTD_UNUSED(ctx);\ + CSTD_UNUSED(katom);\ + CSTD_UNUSED(gpu_addr);\ + CSTD_UNUSED(info_val);\ + CSTD_NOP(0);\ + } while (0) + +#define KBASE_TRACE_ADD(kbdev, code, subcode, ctx, katom, val)\ + do {\ + CSTD_UNUSED(kbdev);\ + CSTD_NOP(code);\ + CSTD_UNUSED(subcode);\ + CSTD_UNUSED(ctx);\ + CSTD_UNUSED(katom);\ + CSTD_UNUSED(val);\ + CSTD_NOP(0);\ + } while (0) + +#define KBASE_TRACE_CLEAR(kbdev)\ + do {\ + CSTD_UNUSED(kbdev);\ + CSTD_NOP(0);\ + } while (0) +#define KBASE_TRACE_DUMP(kbdev)\ + do {\ + CSTD_UNUSED(kbdev);\ + CSTD_NOP(0);\ + } while (0) +#endif /* KBASE_TRACE_ENABLE */ +/** PRIVATE - do not use directly. Use KBASE_TRACE_DUMP() instead */ +void kbasep_trace_dump(struct kbase_device *kbdev); + +#ifdef CONFIG_MALI_DEBUG +/** + * kbase_set_driver_inactive - Force driver to go inactive + * @kbdev: Device pointer + * @inactive: true if driver should go inactive, false otherwise + * + * Forcing the driver inactive will cause all future IOCTLs to wait until the + * driver is made active again. This is intended solely for the use of tests + * which require that no jobs are running while the test executes. + */ +void kbase_set_driver_inactive(struct kbase_device *kbdev, bool inactive); +#endif /* CONFIG_MALI_DEBUG */ + + +#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_MALI_NO_MALI) + +/* kbase_io_history_init - initialize data struct for register access history + * + * @kbdev The register history to initialize + * @n The number of register accesses that the buffer could hold + * + * @return 0 if successfully initialized, failure otherwise + */ +int kbase_io_history_init(struct kbase_io_history *h, u16 n); + +/* kbase_io_history_term - uninit all resources for the register access history + * + * @h The register history to terminate + */ +void kbase_io_history_term(struct kbase_io_history *h); + +/* kbase_io_history_dump - print the register history to the kernel ring buffer + * + * @kbdev Pointer to kbase_device containing the register history to dump + */ +void kbase_io_history_dump(struct kbase_device *kbdev); + +/** + * kbase_io_history_resize - resize the register access history buffer. + * + * @h: Pointer to a valid register history to resize + * @new_size: Number of accesses the buffer could hold + * + * A successful resize will clear all recent register accesses. + * If resizing fails for any reason (e.g., could not allocate memory, invalid + * buffer size) then the original buffer will be kept intact. + * + * @return 0 if the buffer was resized, failure otherwise + */ +int kbase_io_history_resize(struct kbase_io_history *h, u16 new_size); + +#else /* CONFIG_DEBUG_FS */ + +#define kbase_io_history_init(...) ((int)0) + +#define kbase_io_history_term CSTD_NOP + +#define kbase_io_history_dump CSTD_NOP + +#define kbase_io_history_resize CSTD_NOP + +#endif /* CONFIG_DEBUG_FS */ + + +#endif + + + diff --git a/drivers/gpu/arm/midgard/mali_kbase_10969_workaround.c b/drivers/gpu/arm/midgard/mali_kbase_10969_workaround.c new file mode 100644 index 00000000000000..6b3559d933510b --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_10969_workaround.c @@ -0,0 +1,210 @@ +/* + * + * (C) COPYRIGHT 2013-2015,2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + +#include +#include +#include + +/* This function is used to solve an HW issue with single iterator GPUs. + * If a fragment job is soft-stopped on the edge of its bounding box, can happen that the + * restart index is out of bounds and the rerun causes a tile range fault. If this happens + * we try to clamp the restart index to a correct value and rerun the job. + */ +/* Mask of X and Y coordinates for the coordinates words in the descriptors*/ +#define X_COORDINATE_MASK 0x00000FFF +#define Y_COORDINATE_MASK 0x0FFF0000 +/* Max number of words needed from the fragment shader job descriptor */ +#define JOB_HEADER_SIZE_IN_WORDS 10 +#define JOB_HEADER_SIZE (JOB_HEADER_SIZE_IN_WORDS*sizeof(u32)) + +/* Word 0: Status Word */ +#define JOB_DESC_STATUS_WORD 0 +/* Word 1: Restart Index */ +#define JOB_DESC_RESTART_INDEX_WORD 1 +/* Word 2: Fault address low word */ +#define JOB_DESC_FAULT_ADDR_LOW_WORD 2 +/* Word 8: Minimum Tile Coordinates */ +#define FRAG_JOB_DESC_MIN_TILE_COORD_WORD 8 +/* Word 9: Maximum Tile Coordinates */ +#define FRAG_JOB_DESC_MAX_TILE_COORD_WORD 9 + +int kbasep_10969_workaround_clamp_coordinates(struct kbase_jd_atom *katom) +{ + struct device *dev = katom->kctx->kbdev->dev; + u32 clamped = 0; + struct kbase_va_region *region; + struct tagged_addr *page_array; + u64 page_index; + u32 offset = katom->jc & (~PAGE_MASK); + u32 *page_1 = NULL; + u32 *page_2 = NULL; + u32 job_header[JOB_HEADER_SIZE_IN_WORDS]; + void *dst = job_header; + u32 minX, minY, maxX, maxY; + u32 restartX, restartY; + struct page *p; + u32 copy_size; + + dev_warn(dev, "Called TILE_RANGE_FAULT workaround clamping function.\n"); + if (!(katom->core_req & BASE_JD_REQ_FS)) + return 0; + + kbase_gpu_vm_lock(katom->kctx); + region = kbase_region_tracker_find_region_enclosing_address(katom->kctx, + katom->jc); + if (!region || (region->flags & KBASE_REG_FREE)) + goto out_unlock; + + page_array = kbase_get_cpu_phy_pages(region); + if (!page_array) + goto out_unlock; + + page_index = (katom->jc >> PAGE_SHIFT) - region->start_pfn; + + p = phys_to_page(as_phys_addr_t(page_array[page_index])); + + /* we need the first 10 words of the fragment shader job descriptor. + * We need to check that the offset + 10 words is less that the page + * size otherwise we need to load the next page. + * page_size_overflow will be equal to 0 in case the whole descriptor + * is within the page > 0 otherwise. + */ + copy_size = MIN(PAGE_SIZE - offset, JOB_HEADER_SIZE); + + page_1 = kmap_atomic(p); + + /* page_1 is a u32 pointer, offset is expressed in bytes */ + page_1 += offset>>2; + + kbase_sync_single_for_cpu(katom->kctx->kbdev, + kbase_dma_addr(p) + offset, + copy_size, DMA_BIDIRECTIONAL); + + memcpy(dst, page_1, copy_size); + + /* The data needed overflows page the dimension, + * need to map the subsequent page */ + if (copy_size < JOB_HEADER_SIZE) { + p = phys_to_page(as_phys_addr_t(page_array[page_index + 1])); + page_2 = kmap_atomic(p); + + kbase_sync_single_for_cpu(katom->kctx->kbdev, + kbase_dma_addr(p), + JOB_HEADER_SIZE - copy_size, DMA_BIDIRECTIONAL); + + memcpy(dst + copy_size, page_2, JOB_HEADER_SIZE - copy_size); + } + + /* We managed to correctly map one or two pages (in case of overflow) */ + /* Get Bounding Box data and restart index from fault address low word */ + minX = job_header[FRAG_JOB_DESC_MIN_TILE_COORD_WORD] & X_COORDINATE_MASK; + minY = job_header[FRAG_JOB_DESC_MIN_TILE_COORD_WORD] & Y_COORDINATE_MASK; + maxX = job_header[FRAG_JOB_DESC_MAX_TILE_COORD_WORD] & X_COORDINATE_MASK; + maxY = job_header[FRAG_JOB_DESC_MAX_TILE_COORD_WORD] & Y_COORDINATE_MASK; + restartX = job_header[JOB_DESC_FAULT_ADDR_LOW_WORD] & X_COORDINATE_MASK; + restartY = job_header[JOB_DESC_FAULT_ADDR_LOW_WORD] & Y_COORDINATE_MASK; + + dev_warn(dev, "Before Clamping:\n" + "Jobstatus: %08x\n" + "restartIdx: %08x\n" + "Fault_addr_low: %08x\n" + "minCoordsX: %08x minCoordsY: %08x\n" + "maxCoordsX: %08x maxCoordsY: %08x\n", + job_header[JOB_DESC_STATUS_WORD], + job_header[JOB_DESC_RESTART_INDEX_WORD], + job_header[JOB_DESC_FAULT_ADDR_LOW_WORD], + minX, minY, + maxX, maxY); + + /* Set the restart index to the one which generated the fault*/ + job_header[JOB_DESC_RESTART_INDEX_WORD] = + job_header[JOB_DESC_FAULT_ADDR_LOW_WORD]; + + if (restartX < minX) { + job_header[JOB_DESC_RESTART_INDEX_WORD] = (minX) | restartY; + dev_warn(dev, + "Clamping restart X index to minimum. %08x clamped to %08x\n", + restartX, minX); + clamped = 1; + } + if (restartY < minY) { + job_header[JOB_DESC_RESTART_INDEX_WORD] = (minY) | restartX; + dev_warn(dev, + "Clamping restart Y index to minimum. %08x clamped to %08x\n", + restartY, minY); + clamped = 1; + } + if (restartX > maxX) { + job_header[JOB_DESC_RESTART_INDEX_WORD] = (maxX) | restartY; + dev_warn(dev, + "Clamping restart X index to maximum. %08x clamped to %08x\n", + restartX, maxX); + clamped = 1; + } + if (restartY > maxY) { + job_header[JOB_DESC_RESTART_INDEX_WORD] = (maxY) | restartX; + dev_warn(dev, + "Clamping restart Y index to maximum. %08x clamped to %08x\n", + restartY, maxY); + clamped = 1; + } + + if (clamped) { + /* Reset the fault address low word + * and set the job status to STOPPED */ + job_header[JOB_DESC_FAULT_ADDR_LOW_WORD] = 0x0; + job_header[JOB_DESC_STATUS_WORD] = BASE_JD_EVENT_STOPPED; + dev_warn(dev, "After Clamping:\n" + "Jobstatus: %08x\n" + "restartIdx: %08x\n" + "Fault_addr_low: %08x\n" + "minCoordsX: %08x minCoordsY: %08x\n" + "maxCoordsX: %08x maxCoordsY: %08x\n", + job_header[JOB_DESC_STATUS_WORD], + job_header[JOB_DESC_RESTART_INDEX_WORD], + job_header[JOB_DESC_FAULT_ADDR_LOW_WORD], + minX, minY, + maxX, maxY); + + /* Flush CPU cache to update memory for future GPU reads*/ + memcpy(page_1, dst, copy_size); + p = phys_to_page(as_phys_addr_t(page_array[page_index])); + + kbase_sync_single_for_device(katom->kctx->kbdev, + kbase_dma_addr(p) + offset, + copy_size, DMA_TO_DEVICE); + + if (copy_size < JOB_HEADER_SIZE) { + memcpy(page_2, dst + copy_size, + JOB_HEADER_SIZE - copy_size); + p = phys_to_page(as_phys_addr_t(page_array[page_index + + 1])); + + kbase_sync_single_for_device(katom->kctx->kbdev, + kbase_dma_addr(p), + JOB_HEADER_SIZE - copy_size, + DMA_TO_DEVICE); + } + } + if (copy_size < JOB_HEADER_SIZE) + kunmap_atomic(page_2); + + kunmap_atomic(page_1); + +out_unlock: + kbase_gpu_vm_unlock(katom->kctx); + return clamped; +} diff --git a/drivers/gpu/arm/midgard/mali_kbase_10969_workaround.h b/drivers/gpu/arm/midgard/mali_kbase_10969_workaround.h new file mode 100644 index 00000000000000..099a29861672e6 --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_10969_workaround.h @@ -0,0 +1,23 @@ +/* + * + * (C) COPYRIGHT 2013-2014 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +#ifndef _KBASE_10969_WORKAROUND_ +#define _KBASE_10969_WORKAROUND_ + +int kbasep_10969_workaround_clamp_coordinates(struct kbase_jd_atom *katom); + +#endif /* _KBASE_10969_WORKAROUND_ */ diff --git a/drivers/gpu/arm/midgard/mali_kbase_as_fault_debugfs.c b/drivers/gpu/arm/midgard/mali_kbase_as_fault_debugfs.c new file mode 100644 index 00000000000000..f910fe970feb5f --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_as_fault_debugfs.c @@ -0,0 +1,102 @@ +/* + * + * (C) COPYRIGHT 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +#include + +#include +#include + +#ifdef CONFIG_DEBUG_FS +#ifdef CONFIG_MALI_DEBUG + +static int kbase_as_fault_read(struct seq_file *sfile, void *data) +{ + uintptr_t as_no = (uintptr_t) sfile->private; + + struct list_head *entry; + const struct list_head *kbdev_list; + struct kbase_device *kbdev = NULL; + + kbdev_list = kbase_dev_list_get(); + + list_for_each(entry, kbdev_list) { + kbdev = list_entry(entry, struct kbase_device, entry); + + if(kbdev->debugfs_as_read_bitmap & (1ULL << as_no)) { + + /* don't show this one again until another fault occors */ + kbdev->debugfs_as_read_bitmap &= ~(1ULL << as_no); + + /* output the last page fault addr */ + seq_printf(sfile, "%llu\n", (u64) kbdev->as[as_no].fault_addr); + } + + } + + kbase_dev_list_put(kbdev_list); + + return 0; +} + +static int kbase_as_fault_debugfs_open(struct inode *in, struct file *file) +{ + return single_open(file, kbase_as_fault_read , in->i_private); +} + +static const struct file_operations as_fault_fops = { + .open = kbase_as_fault_debugfs_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +#endif /* CONFIG_MALI_DEBUG */ +#endif /* CONFIG_DEBUG_FS */ + +/* + * Initialize debugfs entry for each address space + */ +void kbase_as_fault_debugfs_init(struct kbase_device *kbdev) +{ +#ifdef CONFIG_DEBUG_FS +#ifdef CONFIG_MALI_DEBUG + uint i; + char as_name[64]; + struct dentry *debugfs_directory; + + kbdev->debugfs_as_read_bitmap = 0ULL; + + KBASE_DEBUG_ASSERT(kbdev->nr_hw_address_spaces); + KBASE_DEBUG_ASSERT(sizeof(kbdev->as[0].fault_addr) == sizeof(u64)); + + debugfs_directory = debugfs_create_dir("address_spaces", + kbdev->mali_debugfs_directory); + + if(debugfs_directory) { + for(i = 0; i < kbdev->nr_hw_address_spaces; i++) { + snprintf(as_name, ARRAY_SIZE(as_name), "as%u", i); + debugfs_create_file(as_name, S_IRUGO, + debugfs_directory, (void*) ((uintptr_t) i), &as_fault_fops); + } + } + else + dev_warn(kbdev->dev, "unable to create address_spaces debugfs directory"); + +#endif /* CONFIG_MALI_DEBUG */ +#endif /* CONFIG_DEBUG_FS */ + return; +} diff --git a/drivers/gpu/arm/midgard/mali_kbase_as_fault_debugfs.h b/drivers/gpu/arm/midgard/mali_kbase_as_fault_debugfs.h new file mode 100644 index 00000000000000..3ed2248897fc1b --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_as_fault_debugfs.h @@ -0,0 +1,45 @@ +/* + * + * (C) COPYRIGHT 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +#ifndef _KBASE_AS_FAULT_DEBUG_FS_H +#define _KBASE_AS_FAULT_DEBUG_FS_H + +/** + * kbase_as_fault_debugfs_init() - Add debugfs files for reporting page faults + * + * @kbdev: Pointer to kbase_device + */ +void kbase_as_fault_debugfs_init(struct kbase_device *kbdev); + +/** + * kbase_as_fault_debugfs_new() - make the last fault available on debugfs + * + * @kbdev: Pointer to kbase_device + * @as_no: The address space the fault occurred on + */ +static inline void +kbase_as_fault_debugfs_new(struct kbase_device *kbdev, int as_no) +{ +#ifdef CONFIG_DEBUG_FS +#ifdef CONFIG_MALI_DEBUG + kbdev->debugfs_as_read_bitmap |= (1ULL << as_no); +#endif /* CONFIG_DEBUG_FS */ +#endif /* CONFIG_MALI_DEBUG */ + return; +} + +#endif /*_KBASE_AS_FAULT_DEBUG_FS_H*/ diff --git a/drivers/gpu/arm/midgard/mali_kbase_cache_policy.c b/drivers/gpu/arm/midgard/mali_kbase_cache_policy.c new file mode 100644 index 00000000000000..1d11de67aa8058 --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_cache_policy.c @@ -0,0 +1,54 @@ +/* + * + * (C) COPYRIGHT 2012-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + + +/* + * Cache Policy API. + */ + +#include "mali_kbase_cache_policy.h" + +/* + * The output flags should be a combination of the following values: + * KBASE_REG_CPU_CACHED: CPU cache should be enabled. + */ +u32 kbase_cache_enabled(u32 flags, u32 nr_pages) +{ + u32 cache_flags = 0; + + CSTD_UNUSED(nr_pages); + + if (flags & BASE_MEM_CACHED_CPU) + cache_flags |= KBASE_REG_CPU_CACHED; + + return cache_flags; +} + + +void kbase_sync_single_for_device(struct kbase_device *kbdev, dma_addr_t handle, + size_t size, enum dma_data_direction dir) +{ + dma_sync_single_for_device(kbdev->dev, handle, size, dir); +} + + +void kbase_sync_single_for_cpu(struct kbase_device *kbdev, dma_addr_t handle, + size_t size, enum dma_data_direction dir) +{ + dma_sync_single_for_cpu(kbdev->dev, handle, size, dir); +} diff --git a/drivers/gpu/arm/midgard/mali_kbase_cache_policy.h b/drivers/gpu/arm/midgard/mali_kbase_cache_policy.h new file mode 100644 index 00000000000000..0c18bdb357b0a6 --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_cache_policy.h @@ -0,0 +1,45 @@ +/* + * + * (C) COPYRIGHT 2012-2013, 2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + + +/* + * Cache Policy API. + */ + +#ifndef _KBASE_CACHE_POLICY_H_ +#define _KBASE_CACHE_POLICY_H_ + +#include "mali_kbase.h" +#include "mali_base_kernel.h" + +/** + * kbase_cache_enabled - Choose the cache policy for a specific region + * @flags: flags describing attributes of the region + * @nr_pages: total number of pages (backed or not) for the region + * + * Tells whether the CPU and GPU caches should be enabled or not for a specific + * region. + * This function can be modified to customize the cache policy depending on the + * flags and size of the region. + * + * Return: a combination of %KBASE_REG_CPU_CACHED and %KBASE_REG_GPU_CACHED + * depending on the cache policy + */ +u32 kbase_cache_enabled(u32 flags, u32 nr_pages); + +#endif /* _KBASE_CACHE_POLICY_H_ */ diff --git a/drivers/gpu/arm/midgard/mali_kbase_config.c b/drivers/gpu/arm/midgard/mali_kbase_config.c new file mode 100644 index 00000000000000..fb615ae02ead96 --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_config.c @@ -0,0 +1,51 @@ +/* + * + * (C) COPYRIGHT 2011-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + + +#include +#include +#include + +int kbasep_platform_device_init(struct kbase_device *kbdev) +{ + struct kbase_platform_funcs_conf *platform_funcs_p; + + platform_funcs_p = (struct kbase_platform_funcs_conf *)PLATFORM_FUNCS; + if (platform_funcs_p && platform_funcs_p->platform_init_func) + return platform_funcs_p->platform_init_func(kbdev); + + return 0; +} + +void kbasep_platform_device_term(struct kbase_device *kbdev) +{ + struct kbase_platform_funcs_conf *platform_funcs_p; + + platform_funcs_p = (struct kbase_platform_funcs_conf *)PLATFORM_FUNCS; + if (platform_funcs_p && platform_funcs_p->platform_term_func) + platform_funcs_p->platform_term_func(kbdev); +} + +int kbase_cpuprops_get_default_clock_speed(u32 * const clock_speed) +{ + KBASE_DEBUG_ASSERT(NULL != clock_speed); + + *clock_speed = 100; + return 0; +} + diff --git a/drivers/gpu/arm/midgard/mali_kbase_config.h b/drivers/gpu/arm/midgard/mali_kbase_config.h new file mode 100644 index 00000000000000..212e3b14d96c81 --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_config.h @@ -0,0 +1,343 @@ +/* + * + * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + + +/** + * @file mali_kbase_config.h + * Configuration API and Attributes for KBase + */ + +#ifndef _KBASE_CONFIG_H_ +#define _KBASE_CONFIG_H_ + +#include + +#include +#include + +/** + * @addtogroup base_api + * @{ + */ + +/** + * @addtogroup base_kbase_api + * @{ + */ + +/** + * @addtogroup kbase_config Configuration API and Attributes + * @{ + */ + +#include + +/* Forward declaration of struct kbase_device */ +struct kbase_device; + +/** + * kbase_platform_funcs_conf - Specifies platform init/term function pointers + * + * Specifies the functions pointers for platform specific initialization and + * termination. By default no functions are required. No additional platform + * specific control is necessary. + */ +struct kbase_platform_funcs_conf { + /** + * platform_init_func - platform specific init function pointer + * @kbdev - kbase_device pointer + * + * Returns 0 on success, negative error code otherwise. + * + * Function pointer for platform specific initialization or NULL if no + * initialization function is required. At the point this the GPU is + * not active and its power and clocks are in unknown (platform specific + * state) as kbase doesn't yet have control of power and clocks. + * + * The platform specific private pointer kbase_device::platform_context + * can be accessed (and possibly initialized) in here. + */ + int (*platform_init_func)(struct kbase_device *kbdev); + /** + * platform_term_func - platform specific termination function pointer + * @kbdev - kbase_device pointer + * + * Function pointer for platform specific termination or NULL if no + * termination function is required. At the point this the GPU will be + * idle but still powered and clocked. + * + * The platform specific private pointer kbase_device::platform_context + * can be accessed (and possibly terminated) in here. + */ + void (*platform_term_func)(struct kbase_device *kbdev); +}; + +/* + * @brief Specifies the callbacks for power management + * + * By default no callbacks will be made and the GPU must not be powered off. + */ +struct kbase_pm_callback_conf { + /** Callback for when the GPU is idle and the power to it can be switched off. + * + * The system integrator can decide whether to either do nothing, just switch off + * the clocks to the GPU, or to completely power down the GPU. + * The platform specific private pointer kbase_device::platform_context can be accessed and modified in here. It is the + * platform \em callbacks responsibility to initialize and terminate this pointer if used (see @ref kbase_platform_funcs_conf). + */ + void (*power_off_callback)(struct kbase_device *kbdev); + + /** Callback for when the GPU is about to become active and power must be supplied. + * + * This function must not return until the GPU is powered and clocked sufficiently for register access to + * succeed. The return value specifies whether the GPU was powered down since the call to power_off_callback. + * If the GPU state has been lost then this function must return 1, otherwise it should return 0. + * The platform specific private pointer kbase_device::platform_context can be accessed and modified in here. It is the + * platform \em callbacks responsibility to initialize and terminate this pointer if used (see @ref kbase_platform_funcs_conf). + * + * The return value of the first call to this function is ignored. + * + * @return 1 if the GPU state may have been lost, 0 otherwise. + */ + int (*power_on_callback)(struct kbase_device *kbdev); + + /** Callback for when the system is requesting a suspend and GPU power + * must be switched off. + * + * Note that if this callback is present, then this may be called + * without a preceding call to power_off_callback. Therefore this + * callback must be able to take any action that might otherwise happen + * in power_off_callback. + * + * The platform specific private pointer kbase_device::platform_context + * can be accessed and modified in here. It is the platform \em + * callbacks responsibility to initialize and terminate this pointer if + * used (see @ref kbase_platform_funcs_conf). + */ + void (*power_suspend_callback)(struct kbase_device *kbdev); + + /** Callback for when the system is resuming from a suspend and GPU + * power must be switched on. + * + * Note that if this callback is present, then this may be called + * without a following call to power_on_callback. Therefore this + * callback must be able to take any action that might otherwise happen + * in power_on_callback. + * + * The platform specific private pointer kbase_device::platform_context + * can be accessed and modified in here. It is the platform \em + * callbacks responsibility to initialize and terminate this pointer if + * used (see @ref kbase_platform_funcs_conf). + */ + void (*power_resume_callback)(struct kbase_device *kbdev); + + /** Callback for handling runtime power management initialization. + * + * The runtime power management callbacks @ref power_runtime_off_callback and @ref power_runtime_on_callback + * will become active from calls made to the OS from within this function. + * The runtime calls can be triggered by calls from @ref power_off_callback and @ref power_on_callback. + * Note: for linux the kernel must have CONFIG_PM_RUNTIME enabled to use this feature. + * + * @return 0 on success, else int error code. + */ + int (*power_runtime_init_callback)(struct kbase_device *kbdev); + + /** Callback for handling runtime power management termination. + * + * The runtime power management callbacks @ref power_runtime_off_callback and @ref power_runtime_on_callback + * should no longer be called by the OS on completion of this function. + * Note: for linux the kernel must have CONFIG_PM_RUNTIME enabled to use this feature. + */ + void (*power_runtime_term_callback)(struct kbase_device *kbdev); + + /** Callback for runtime power-off power management callback + * + * For linux this callback will be called by the kernel runtime_suspend callback. + * Note: for linux the kernel must have CONFIG_PM_RUNTIME enabled to use this feature. + * + * @return 0 on success, else OS error code. + */ + void (*power_runtime_off_callback)(struct kbase_device *kbdev); + + /** Callback for runtime power-on power management callback + * + * For linux this callback will be called by the kernel runtime_resume callback. + * Note: for linux the kernel must have CONFIG_PM_RUNTIME enabled to use this feature. + */ + int (*power_runtime_on_callback)(struct kbase_device *kbdev); + + /* + * Optional callback for checking if GPU can be suspended when idle + * + * This callback will be called by the runtime power management core + * when the reference count goes to 0 to provide notification that the + * GPU now seems idle. + * + * If this callback finds that the GPU can't be powered off, or handles + * suspend by powering off directly or queueing up a power off, a + * non-zero value must be returned to prevent the runtime PM core from + * also triggering a suspend. + * + * Returning 0 will cause the runtime PM core to conduct a regular + * autosuspend. + * + * This callback is optional and if not provided regular autosuspend + * will be triggered. + * + * Note: The Linux kernel must have CONFIG_PM_RUNTIME enabled to use + * this feature. + * + * Return 0 if GPU can be suspended, positive value if it can not be + * suspeneded by runtime PM, else OS error code + */ + int (*power_runtime_idle_callback)(struct kbase_device *kbdev); +}; + +/** + * kbase_cpuprops_get_default_clock_speed - default for CPU_SPEED_FUNC + * @clock_speed - see kbase_cpu_clk_speed_func for details on the parameters + * + * Returns 0 on success, negative error code otherwise. + * + * Default implementation of CPU_SPEED_FUNC. This function sets clock_speed + * to 100, so will be an underestimate for any real system. + */ +int kbase_cpuprops_get_default_clock_speed(u32 * const clock_speed); + +/** + * kbase_cpu_clk_speed_func - Type of the function pointer for CPU_SPEED_FUNC + * @param clock_speed - pointer to store the current CPU clock speed in MHz + * + * Returns 0 on success, otherwise negative error code. + * + * This is mainly used to implement OpenCL's clGetDeviceInfo(). + */ +typedef int (*kbase_cpu_clk_speed_func) (u32 *clock_speed); + +/** + * kbase_gpu_clk_speed_func - Type of the function pointer for GPU_SPEED_FUNC + * @param clock_speed - pointer to store the current GPU clock speed in MHz + * + * Returns 0 on success, otherwise negative error code. + * When an error is returned the caller assumes maximum GPU speed stored in + * gpu_freq_khz_max. + * + * If the system timer is not available then this function is required + * for the OpenCL queue profiling to return correct timing information. + * + */ +typedef int (*kbase_gpu_clk_speed_func) (u32 *clock_speed); + +#ifdef CONFIG_OF +struct kbase_platform_config { +}; +#else + +/* + * @brief Specifies start and end of I/O memory region. + */ +struct kbase_io_memory_region { + u64 start; + u64 end; +}; + +/* + * @brief Specifies I/O related resources like IRQs and memory region for I/O operations. + */ +struct kbase_io_resources { + u32 job_irq_number; + u32 mmu_irq_number; + u32 gpu_irq_number; + struct kbase_io_memory_region io_memory_region; +}; + +struct kbase_platform_config { + const struct kbase_io_resources *io_resources; +}; + +#endif /* CONFIG_OF */ + +/** + * @brief Gets the pointer to platform config. + * + * @return Pointer to the platform config + */ +struct kbase_platform_config *kbase_get_platform_config(void); + +/** + * kbasep_platform_device_init: - Platform specific call to initialize hardware + * @kbdev: kbase device pointer + * + * Function calls a platform defined routine if specified in the configuration + * attributes. The routine can initialize any hardware and context state that + * is required for the GPU block to function. + * + * Return: 0 if no errors have been found in the config. + * Negative error code otherwise. + */ +int kbasep_platform_device_init(struct kbase_device *kbdev); + +/** + * kbasep_platform_device_term - Platform specific call to terminate hardware + * @kbdev: Kbase device pointer + * + * Function calls a platform defined routine if specified in the configuration + * attributes. The routine can destroy any platform specific context state and + * shut down any hardware functionality that are outside of the Power Management + * callbacks. + * + */ +void kbasep_platform_device_term(struct kbase_device *kbdev); + + +/** + * kbase_platform_early_init - Early initialisation of the platform code + * + * This function will be called when the module is loaded to perform any + * early initialisation required by the platform code. Such as reading + * platform specific device tree entries for the GPU. + * + * Return: 0 for success, any other fail causes module initialisation to fail + */ +int kbase_platform_early_init(void); + +#ifndef CONFIG_OF +/** + * kbase_platform_register - Register a platform device for the GPU + * + * This can be used to register a platform device on systems where device tree + * is not enabled and the platform initialisation code in the kernel doesn't + * create the GPU device. Where possible device tree should be used instead. + * + * Return: 0 for success, any other fail causes module initialisation to fail + */ +int kbase_platform_register(void); + +/** + * kbase_platform_unregister - Unregister a fake platform device + * + * Unregister the platform device created with kbase_platform_register() + */ +void kbase_platform_unregister(void); +#endif + + /** @} *//* end group kbase_config */ + /** @} *//* end group base_kbase_api */ + /** @} *//* end group base_api */ + +#endif /* _KBASE_CONFIG_H_ */ diff --git a/drivers/gpu/arm/midgard/mali_kbase_config_defaults.h b/drivers/gpu/arm/midgard/mali_kbase_config_defaults.h new file mode 100644 index 00000000000000..aee6b0c0edaaeb --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_config_defaults.h @@ -0,0 +1,271 @@ +/* + * + * (C) COPYRIGHT 2013-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +/** + * @file mali_kbase_config_defaults.h + * + * Default values for configuration settings + * + */ + +#ifndef _KBASE_CONFIG_DEFAULTS_H_ +#define _KBASE_CONFIG_DEFAULTS_H_ + +/* Include mandatory definitions per platform */ +#include + +/** +* Boolean indicating whether the driver is configured to be secure at +* a potential loss of performance. +* +* This currently affects only r0p0-15dev0 HW and earlier. +* +* On r0p0-15dev0 HW and earlier, there are tradeoffs between security and +* performance: +* +* - When this is set to true, the driver remains fully secure, +* but potentially loses performance compared with setting this to +* false. +* - When set to false, the driver is open to certain security +* attacks. +* +* From r0p0-00rel0 and onwards, there is no security loss by setting +* this to false, and no performance loss by setting it to +* true. +*/ +#define DEFAULT_SECURE_BUT_LOSS_OF_PERFORMANCE false + +enum { + /** + * Use unrestricted Address ID width on the AXI bus. + */ + KBASE_AID_32 = 0x0, + + /** + * Restrict GPU to a half of maximum Address ID count. + * This will reduce performance, but reduce bus load due to GPU. + */ + KBASE_AID_16 = 0x3, + + /** + * Restrict GPU to a quarter of maximum Address ID count. + * This will reduce performance, but reduce bus load due to GPU. + */ + KBASE_AID_8 = 0x2, + + /** + * Restrict GPU to an eighth of maximum Address ID count. + * This will reduce performance, but reduce bus load due to GPU. + */ + KBASE_AID_4 = 0x1 +}; + +enum { + /** + * Use unrestricted Address ID width on the AXI bus. + * Restricting ID width will reduce performance & bus load due to GPU. + */ + KBASE_3BIT_AID_32 = 0x0, + + /* Restrict GPU to 7/8 of maximum Address ID count. */ + KBASE_3BIT_AID_28 = 0x1, + + /* Restrict GPU to 3/4 of maximum Address ID count. */ + KBASE_3BIT_AID_24 = 0x2, + + /* Restrict GPU to 5/8 of maximum Address ID count. */ + KBASE_3BIT_AID_20 = 0x3, + + /* Restrict GPU to 1/2 of maximum Address ID count. */ + KBASE_3BIT_AID_16 = 0x4, + + /* Restrict GPU to 3/8 of maximum Address ID count. */ + KBASE_3BIT_AID_12 = 0x5, + + /* Restrict GPU to 1/4 of maximum Address ID count. */ + KBASE_3BIT_AID_8 = 0x6, + + /* Restrict GPU to 1/8 of maximum Address ID count. */ + KBASE_3BIT_AID_4 = 0x7 +}; + +/** + * Default setting for read Address ID limiting on AXI bus. + * + * Attached value: u32 register value + * KBASE_AID_32 - use the full 32 IDs (5 ID bits) + * KBASE_AID_16 - use 16 IDs (4 ID bits) + * KBASE_AID_8 - use 8 IDs (3 ID bits) + * KBASE_AID_4 - use 4 IDs (2 ID bits) + * Default value: KBASE_AID_32 (no limit). Note hardware implementation + * may limit to a lower value. + */ +#define DEFAULT_ARID_LIMIT KBASE_AID_32 + +/** + * Default setting for write Address ID limiting on AXI. + * + * Attached value: u32 register value + * KBASE_AID_32 - use the full 32 IDs (5 ID bits) + * KBASE_AID_16 - use 16 IDs (4 ID bits) + * KBASE_AID_8 - use 8 IDs (3 ID bits) + * KBASE_AID_4 - use 4 IDs (2 ID bits) + * Default value: KBASE_AID_32 (no limit). Note hardware implementation + * may limit to a lower value. + */ +#define DEFAULT_AWID_LIMIT KBASE_AID_32 + +/** + * Default setting for read Address ID limiting on AXI bus. + * + * Default value: KBASE_3BIT_AID_32 (no limit). Note hardware implementation + * may limit to a lower value. + */ +#define DEFAULT_3BIT_ARID_LIMIT KBASE_3BIT_AID_32 + +/** + * Default setting for write Address ID limiting on AXI. + * + * Default value: KBASE_3BIT_AID_32 (no limit). Note hardware implementation + * may limit to a lower value. + */ +#define DEFAULT_3BIT_AWID_LIMIT KBASE_3BIT_AID_32 + +/** + * Default UMP device mapping. A UMP_DEVICE__SHIFT value which + * defines which UMP device this GPU should be mapped to. + */ +#define DEFAULT_UMP_GPU_DEVICE_SHIFT UMP_DEVICE_Z_SHIFT + +/* + * Default period for DVFS sampling + */ +#define DEFAULT_PM_DVFS_PERIOD 100 /* 100ms */ + +/* + * Power Management poweroff tick granuality. This is in nanoseconds to + * allow HR timer support. + * + * On each scheduling tick, the power manager core may decide to: + * -# Power off one or more shader cores + * -# Power off the entire GPU + */ +#define DEFAULT_PM_GPU_POWEROFF_TICK_NS (400000) /* 400us */ + +/* + * Power Manager number of ticks before shader cores are powered off + */ +#define DEFAULT_PM_POWEROFF_TICK_SHADER (2) /* 400-800us */ + +/* + * Power Manager number of ticks before GPU is powered off + */ +#define DEFAULT_PM_POWEROFF_TICK_GPU (2) /* 400-800us */ + +/* + * Default scheduling tick granuality + */ +#define DEFAULT_JS_SCHEDULING_PERIOD_NS (100000000u) /* 100ms */ + +/* + * Default minimum number of scheduling ticks before jobs are soft-stopped. + * + * This defines the time-slice for a job (which may be different from that of a + * context) + */ +#define DEFAULT_JS_SOFT_STOP_TICKS (1) /* 100ms-200ms */ + +/* + * Default minimum number of scheduling ticks before CL jobs are soft-stopped. + */ +#define DEFAULT_JS_SOFT_STOP_TICKS_CL (1) /* 100ms-200ms */ + +/* + * Default minimum number of scheduling ticks before jobs are hard-stopped + */ +#define DEFAULT_JS_HARD_STOP_TICKS_SS (50) /* 5s */ +#define DEFAULT_JS_HARD_STOP_TICKS_SS_8408 (300) /* 30s */ + +/* + * Default minimum number of scheduling ticks before CL jobs are hard-stopped. + */ +#define DEFAULT_JS_HARD_STOP_TICKS_CL (50) /* 5s */ + +/* + * Default minimum number of scheduling ticks before jobs are hard-stopped + * during dumping + */ +#define DEFAULT_JS_HARD_STOP_TICKS_DUMPING (15000) /* 1500s */ + +/* + * Default timeout for some software jobs, after which the software event wait + * jobs will be cancelled. + */ +#define DEFAULT_JS_SOFT_JOB_TIMEOUT (3000) /* 3s */ + +/* + * Default minimum number of scheduling ticks before the GPU is reset to clear a + * "stuck" job + */ +#define DEFAULT_JS_RESET_TICKS_SS (55) /* 5.5s */ +#define DEFAULT_JS_RESET_TICKS_SS_8408 (450) /* 45s */ + +/* + * Default minimum number of scheduling ticks before the GPU is reset to clear a + * "stuck" CL job. + */ +#define DEFAULT_JS_RESET_TICKS_CL (55) /* 5.5s */ + +/* + * Default minimum number of scheduling ticks before the GPU is reset to clear a + * "stuck" job during dumping. + */ +#define DEFAULT_JS_RESET_TICKS_DUMPING (15020) /* 1502s */ + +/* + * Default number of milliseconds given for other jobs on the GPU to be + * soft-stopped when the GPU needs to be reset. + */ +#define DEFAULT_RESET_TIMEOUT_MS (3000) /* 3s */ + +/* + * Default timeslice that a context is scheduled in for, in nanoseconds. + * + * When a context has used up this amount of time across its jobs, it is + * scheduled out to let another run. + * + * @note the resolution is nanoseconds (ns) here, because that's the format + * often used by the OS. + */ +#define DEFAULT_JS_CTX_TIMESLICE_NS (50000000) /* 50ms */ + +/* + * Perform GPU power down using only platform specific code, skipping DDK power + * management. + * + * If this is non-zero then kbase will avoid powering down shader cores, the + * tiler, and the L2 cache, instead just powering down the entire GPU through + * platform specific code. This may be required for certain platform + * integrations. + * + * Note that as this prevents kbase from powering down shader cores, this limits + * the available power policies to coarse_demand and always_on. + */ +#define PLATFORM_POWER_DOWN_ONLY (0) + +#endif /* _KBASE_CONFIG_DEFAULTS_H_ */ + diff --git a/drivers/gpu/arm/midgard/mali_kbase_context.c b/drivers/gpu/arm/midgard/mali_kbase_context.c new file mode 100644 index 00000000000000..589df768ce6075 --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_context.c @@ -0,0 +1,359 @@ +/* + * + * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + + +/* + * Base kernel context APIs + */ + +#include +#include +#include +#include +#include + +/** + * kbase_create_context() - Create a kernel base context. + * @kbdev: Kbase device + * @is_compat: Force creation of a 32-bit context + * + * Allocate and init a kernel base context. + * + * Return: new kbase context + */ +struct kbase_context * +kbase_create_context(struct kbase_device *kbdev, bool is_compat) +{ + struct kbase_context *kctx; + int err; + struct page *p; + + KBASE_DEBUG_ASSERT(kbdev != NULL); + + /* zero-inited as lot of code assume it's zero'ed out on create */ + kctx = vzalloc(sizeof(*kctx)); + + if (!kctx) + goto out; + + /* creating a context is considered a disjoint event */ + kbase_disjoint_event(kbdev); + + kctx->kbdev = kbdev; + kctx->as_nr = KBASEP_AS_NR_INVALID; + atomic_set(&kctx->refcount, 0); + if (is_compat) + kbase_ctx_flag_set(kctx, KCTX_COMPAT); +#ifdef CONFIG_MALI_TRACE_TIMELINE + kctx->timeline.owner_tgid = task_tgid_nr(current); +#endif + atomic_set(&kctx->setup_complete, 0); + atomic_set(&kctx->setup_in_progress, 0); + spin_lock_init(&kctx->mm_update_lock); + kctx->process_mm = NULL; + atomic_set(&kctx->nonmapped_pages, 0); + kctx->slots_pullable = 0; + kctx->tgid = current->tgid; + kctx->pid = current->pid; + + err = kbase_mem_pool_init(&kctx->mem_pool, + kbdev->mem_pool_max_size_default, + KBASE_MEM_POOL_4KB_PAGE_TABLE_ORDER, + kctx->kbdev, + &kbdev->mem_pool); + if (err) + goto free_kctx; + + err = kbase_mem_pool_init(&kctx->lp_mem_pool, + (kbdev->mem_pool_max_size_default >> 9), + KBASE_MEM_POOL_2MB_PAGE_TABLE_ORDER, + kctx->kbdev, + &kbdev->lp_mem_pool); + if (err) + goto free_mem_pool; + + err = kbase_mem_evictable_init(kctx); + if (err) + goto free_both_pools; + + atomic_set(&kctx->used_pages, 0); + + err = kbase_jd_init(kctx); + if (err) + goto deinit_evictable; + + err = kbasep_js_kctx_init(kctx); + if (err) + goto free_jd; /* safe to call kbasep_js_kctx_term in this case */ + + err = kbase_event_init(kctx); + if (err) + goto free_jd; + + atomic_set(&kctx->drain_pending, 0); + + mutex_init(&kctx->reg_lock); + + mutex_init(&kctx->mem_partials_lock); + INIT_LIST_HEAD(&kctx->mem_partials); + + INIT_LIST_HEAD(&kctx->waiting_soft_jobs); + spin_lock_init(&kctx->waiting_soft_jobs_lock); + err = kbase_dma_fence_init(kctx); + if (err) + goto free_event; + + err = kbase_mmu_init(kctx); + if (err) + goto term_dma_fence; + + do { + err = kbase_mem_pool_grow(&kctx->mem_pool, + MIDGARD_MMU_BOTTOMLEVEL); + if (err) + goto pgd_no_mem; + + mutex_lock(&kctx->mmu_lock); + kctx->pgd = kbase_mmu_alloc_pgd(kctx); + mutex_unlock(&kctx->mmu_lock); + } while (!kctx->pgd); + + p = kbase_mem_alloc_page(&kctx->mem_pool); + if (!p) + goto no_sink_page; + kctx->aliasing_sink_page = as_tagged(page_to_phys(p)); + + init_waitqueue_head(&kctx->event_queue); + + kctx->cookies = KBASE_COOKIE_MASK; + + /* Make sure page 0 is not used... */ + err = kbase_region_tracker_init(kctx); + if (err) + goto no_region_tracker; + + err = kbase_sticky_resource_init(kctx); + if (err) + goto no_sticky; + + err = kbase_jit_init(kctx); + if (err) + goto no_jit; +#ifdef CONFIG_GPU_TRACEPOINTS + atomic_set(&kctx->jctx.work_id, 0); +#endif +#ifdef CONFIG_MALI_TRACE_TIMELINE + atomic_set(&kctx->timeline.jd_atoms_in_flight, 0); +#endif + + kctx->id = atomic_add_return(1, &(kbdev->ctx_num)) - 1; + + mutex_init(&kctx->vinstr_cli_lock); + + timer_setup(&kctx->soft_job_timeout, + kbasep_soft_job_timeout_worker, + 0); + + return kctx; + +no_jit: + kbase_gpu_vm_lock(kctx); + kbase_sticky_resource_term(kctx); + kbase_gpu_vm_unlock(kctx); +no_sticky: + kbase_region_tracker_term(kctx); +no_region_tracker: + kbase_mem_pool_free(&kctx->mem_pool, p, false); +no_sink_page: + /* VM lock needed for the call to kbase_mmu_free_pgd */ + kbase_gpu_vm_lock(kctx); + kbase_mmu_free_pgd(kctx); + kbase_gpu_vm_unlock(kctx); +pgd_no_mem: + kbase_mmu_term(kctx); +term_dma_fence: + kbase_dma_fence_term(kctx); +free_event: + kbase_event_cleanup(kctx); +free_jd: + /* Safe to call this one even when didn't initialize (assuming kctx was sufficiently zeroed) */ + kbasep_js_kctx_term(kctx); + kbase_jd_exit(kctx); +deinit_evictable: + kbase_mem_evictable_deinit(kctx); +free_both_pools: + kbase_mem_pool_term(&kctx->lp_mem_pool); +free_mem_pool: + kbase_mem_pool_term(&kctx->mem_pool); +free_kctx: + vfree(kctx); +out: + return NULL; +} +KBASE_EXPORT_SYMBOL(kbase_create_context); + +static void kbase_reg_pending_dtor(struct kbase_va_region *reg) +{ + dev_dbg(reg->kctx->kbdev->dev, "Freeing pending unmapped region\n"); + kbase_mem_phy_alloc_put(reg->cpu_alloc); + kbase_mem_phy_alloc_put(reg->gpu_alloc); + kfree(reg); +} + +/** + * kbase_destroy_context - Destroy a kernel base context. + * @kctx: Context to destroy + * + * Calls kbase_destroy_os_context() to free OS specific structures. + * Will release all outstanding regions. + */ +void kbase_destroy_context(struct kbase_context *kctx) +{ + struct kbase_device *kbdev; + int pages; + unsigned long pending_regions_to_clean; + unsigned long flags; + struct page *p; + + KBASE_DEBUG_ASSERT(NULL != kctx); + + kbdev = kctx->kbdev; + KBASE_DEBUG_ASSERT(NULL != kbdev); + + KBASE_TRACE_ADD(kbdev, CORE_CTX_DESTROY, kctx, NULL, 0u, 0u); + + /* Ensure the core is powered up for the destroy process */ + /* A suspend won't happen here, because we're in a syscall from a userspace + * thread. */ + kbase_pm_context_active(kbdev); + + kbase_jd_zap_context(kctx); + +#ifdef CONFIG_DEBUG_FS + /* Removing the rest of the debugfs entries here as we want to keep the + * atom debugfs interface alive until all atoms have completed. This + * is useful for debugging hung contexts. */ + debugfs_remove_recursive(kctx->kctx_dentry); +#endif + + kbase_event_cleanup(kctx); + + /* + * JIT must be terminated before the code below as it must be called + * without the region lock being held. + * The code above ensures no new JIT allocations can be made by + * by the time we get to this point of context tear down. + */ + kbase_jit_term(kctx); + + kbase_gpu_vm_lock(kctx); + + kbase_sticky_resource_term(kctx); + + /* MMU is disabled as part of scheduling out the context */ + kbase_mmu_free_pgd(kctx); + + /* drop the aliasing sink page now that it can't be mapped anymore */ + p = phys_to_page(as_phys_addr_t(kctx->aliasing_sink_page)); + kbase_mem_pool_free(&kctx->mem_pool, p, false); + + /* free pending region setups */ + pending_regions_to_clean = (~kctx->cookies) & KBASE_COOKIE_MASK; + while (pending_regions_to_clean) { + unsigned int cookie = __ffs(pending_regions_to_clean); + + BUG_ON(!kctx->pending_regions[cookie]); + + kbase_reg_pending_dtor(kctx->pending_regions[cookie]); + + kctx->pending_regions[cookie] = NULL; + pending_regions_to_clean &= ~(1UL << cookie); + } + + kbase_region_tracker_term(kctx); + kbase_gpu_vm_unlock(kctx); + + /* Safe to call this one even when didn't initialize (assuming kctx was sufficiently zeroed) */ + kbasep_js_kctx_term(kctx); + + kbase_jd_exit(kctx); + + kbase_dma_fence_term(kctx); + + mutex_lock(&kbdev->mmu_hw_mutex); + spin_lock_irqsave(&kctx->kbdev->hwaccess_lock, flags); + kbase_ctx_sched_remove_ctx(kctx); + spin_unlock_irqrestore(&kctx->kbdev->hwaccess_lock, flags); + mutex_unlock(&kbdev->mmu_hw_mutex); + + kbase_mmu_term(kctx); + + pages = atomic_read(&kctx->used_pages); + if (pages != 0) + dev_warn(kbdev->dev, "%s: %d pages in use!\n", __func__, pages); + + kbase_mem_evictable_deinit(kctx); + kbase_mem_pool_term(&kctx->mem_pool); + kbase_mem_pool_term(&kctx->lp_mem_pool); + WARN_ON(atomic_read(&kctx->nonmapped_pages) != 0); + + vfree(kctx); + + kbase_pm_context_idle(kbdev); +} +KBASE_EXPORT_SYMBOL(kbase_destroy_context); + +/** + * kbase_context_set_create_flags - Set creation flags on a context + * @kctx: Kbase context + * @flags: Flags to set + * + * Return: 0 on success + */ +int kbase_context_set_create_flags(struct kbase_context *kctx, u32 flags) +{ + int err = 0; + struct kbasep_js_kctx_info *js_kctx_info; + unsigned long irq_flags; + + KBASE_DEBUG_ASSERT(NULL != kctx); + + js_kctx_info = &kctx->jctx.sched_info; + + /* Validate flags */ + if (flags != (flags & BASE_CONTEXT_CREATE_KERNEL_FLAGS)) { + err = -EINVAL; + goto out; + } + + mutex_lock(&js_kctx_info->ctx.jsctx_mutex); + spin_lock_irqsave(&kctx->kbdev->hwaccess_lock, irq_flags); + + /* Translate the flags */ + if ((flags & BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED) == 0) + kbase_ctx_flag_clear(kctx, KCTX_SUBMIT_DISABLED); + + /* Latch the initial attributes into the Job Scheduler */ + kbasep_js_ctx_attr_set_initial_attrs(kctx->kbdev, kctx); + + spin_unlock_irqrestore(&kctx->kbdev->hwaccess_lock, irq_flags); + mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); + out: + return err; +} +KBASE_EXPORT_SYMBOL(kbase_context_set_create_flags); diff --git a/drivers/gpu/arm/midgard/mali_kbase_context.h b/drivers/gpu/arm/midgard/mali_kbase_context.h new file mode 100644 index 00000000000000..a3f5bb0ce0da38 --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_context.h @@ -0,0 +1,90 @@ +/* + * + * (C) COPYRIGHT 2011-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +#ifndef _KBASE_CONTEXT_H_ +#define _KBASE_CONTEXT_H_ + +#include + + +int kbase_context_set_create_flags(struct kbase_context *kctx, u32 flags); + +/** + * kbase_ctx_flag - Check if @flag is set on @kctx + * @kctx: Pointer to kbase context to check + * @flag: Flag to check + * + * Return: true if @flag is set on @kctx, false if not. + */ +static inline bool kbase_ctx_flag(struct kbase_context *kctx, + enum kbase_context_flags flag) +{ + return atomic_read(&kctx->flags) & flag; +} + +/** + * kbase_ctx_flag_clear - Clear @flag on @kctx + * @kctx: Pointer to kbase context + * @flag: Flag to clear + * + * Clear the @flag on @kctx. This is done atomically, so other flags being + * cleared or set at the same time will be safe. + * + * Some flags have locking requirements, check the documentation for the + * respective flags. + */ +static inline void kbase_ctx_flag_clear(struct kbase_context *kctx, + enum kbase_context_flags flag) +{ +#if KERNEL_VERSION(4, 3, 0) > LINUX_VERSION_CODE + /* + * Earlier kernel versions doesn't have atomic_andnot() or + * atomic_and(). atomic_clear_mask() was only available on some + * architectures and removed on arm in v3.13 on arm and arm64. + * + * Use a compare-exchange loop to clear the flag on pre 4.3 kernels, + * when atomic_andnot() becomes available. + */ + int old, new; + + do { + old = atomic_read(&kctx->flags); + new = old & ~flag; + + } while (atomic_cmpxchg(&kctx->flags, old, new) != old); +#else + atomic_andnot(flag, &kctx->flags); +#endif +} + +/** + * kbase_ctx_flag_set - Set @flag on @kctx + * @kctx: Pointer to kbase context + * @flag: Flag to clear + * + * Set the @flag on @kctx. This is done atomically, so other flags being + * cleared or set at the same time will be safe. + * + * Some flags have locking requirements, check the documentation for the + * respective flags. + */ +static inline void kbase_ctx_flag_set(struct kbase_context *kctx, + enum kbase_context_flags flag) +{ + atomic_or(flag, &kctx->flags); +} +#endif /* _KBASE_CONTEXT_H_ */ diff --git a/drivers/gpu/arm/midgard/mali_kbase_core_linux.c b/drivers/gpu/arm/midgard/mali_kbase_core_linux.c new file mode 100644 index 00000000000000..ebcbf0f3b9772b --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_core_linux.c @@ -0,0 +1,4948 @@ +/* + * + * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_MALI_DEVFREQ +#include +#include +#ifdef CONFIG_DEVFREQ_THERMAL +#include +#endif /* CONFIG_DEVFREQ_THERMAL */ +#endif /* CONFIG_MALI_DEVFREQ */ +#ifdef CONFIG_MALI_NO_MALI +#include "mali_kbase_model_linux.h" +#endif /* CONFIG_MALI_NO_MALI */ +#include "mali_kbase_mem_profile_debugfs_buf_size.h" +#include "mali_kbase_debug_mem_view.h" +#include "mali_kbase_mem.h" +#include "mali_kbase_mem_pool_debugfs.h" +#if !MALI_CUSTOMER_RELEASE +#include "mali_kbase_regs_dump_debugfs.h" +#endif /* !MALI_CUSTOMER_RELEASE */ +#include "mali_kbase_regs_history_debugfs.h" +#include +#include +#include +#include +#include "mali_kbase_ioctl.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* is_compat_task */ +#include +#include +#include +#include +#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE) +#include +#endif /* CONFIG_SYNC || CONFIG_SYNC_FILE */ +#include +#include + +#include + + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)) +#include +#else +#include +#endif + +#include + +#include + +/* GPU IRQ Tags */ +#define JOB_IRQ_TAG 0 +#define MMU_IRQ_TAG 1 +#define GPU_IRQ_TAG 2 + +static int kbase_dev_nr; + +static DEFINE_MUTEX(kbase_dev_list_lock); +static LIST_HEAD(kbase_dev_list); + +#define KERNEL_SIDE_DDK_VERSION_STRING "K:" MALI_RELEASE_NAME "(GPL)" + +static int kbase_api_handshake(struct kbase_context *kctx, + struct kbase_ioctl_version_check *version) +{ + switch (version->major) { + case BASE_UK_VERSION_MAJOR: + /* set minor to be the lowest common */ + version->minor = min_t(int, BASE_UK_VERSION_MINOR, + (int)version->minor); + break; + default: + /* We return our actual version regardless if it + * matches the version returned by userspace - + * userspace can bail if it can't handle this + * version */ + version->major = BASE_UK_VERSION_MAJOR; + version->minor = BASE_UK_VERSION_MINOR; + break; + } + + /* save the proposed version number for later use */ + kctx->api_version = KBASE_API_VERSION(version->major, version->minor); + + return 0; +} + +/** + * enum mali_error - Mali error codes shared with userspace + * + * This is subset of those common Mali errors that can be returned to userspace. + * Values of matching user and kernel space enumerators MUST be the same. + * MALI_ERROR_NONE is guaranteed to be 0. + * + * @MALI_ERROR_NONE: Success + * @MALI_ERROR_OUT_OF_GPU_MEMORY: Not used in the kernel driver + * @MALI_ERROR_OUT_OF_MEMORY: Memory allocation failure + * @MALI_ERROR_FUNCTION_FAILED: Generic error code + */ +enum mali_error { + MALI_ERROR_NONE = 0, + MALI_ERROR_OUT_OF_GPU_MEMORY, + MALI_ERROR_OUT_OF_MEMORY, + MALI_ERROR_FUNCTION_FAILED, +}; + +enum { + inited_mem = (1u << 0), + inited_js = (1u << 1), + /* Bit number 2 was earlier assigned to the runtime-pm initialization + * stage (which has been merged with the backend_early stage). + */ +#ifdef CONFIG_MALI_DEVFREQ + inited_devfreq = (1u << 3), +#endif /* CONFIG_MALI_DEVFREQ */ + inited_tlstream = (1u << 4), + inited_backend_early = (1u << 5), + inited_backend_late = (1u << 6), + inited_device = (1u << 7), + inited_vinstr = (1u << 8), + + inited_job_fault = (1u << 10), + inited_sysfs_group = (1u << 11), + inited_misc_register = (1u << 12), + inited_get_device = (1u << 13), + inited_dev_list = (1u << 14), + inited_debugfs = (1u << 15), + inited_gpu_device = (1u << 16), + inited_registers_map = (1u << 17), + inited_io_history = (1u << 18), + inited_power_control = (1u << 19), + inited_buslogger = (1u << 20), + inited_protected = (1u << 21), + inited_ctx_sched = (1u << 22) +}; + + +#ifdef CONFIG_MALI_DEBUG +#define INACTIVE_WAIT_MS (5000) + +void kbase_set_driver_inactive(struct kbase_device *kbdev, bool inactive) +{ + kbdev->driver_inactive = inactive; + wake_up(&kbdev->driver_inactive_wait); + + /* Wait for any running IOCTLs to complete */ + if (inactive) + msleep(INACTIVE_WAIT_MS); +} +KBASE_EXPORT_TEST_API(kbase_set_driver_inactive); +#endif /* CONFIG_MALI_DEBUG */ + +/** + * kbase_legacy_dispatch - UKK dispatch function + * + * This is the dispatch function for the legacy UKK ioctl interface. No new + * ioctls should be added to this function, see kbase_ioctl instead. + * + * @kctx: The kernel context structure + * @args: Pointer to the data structure passed from/to user space + * @args_size: Size of the data structure + */ +static int kbase_legacy_dispatch(struct kbase_context *kctx, + void * const args, u32 args_size) +{ + struct kbase_device *kbdev; + union uk_header *ukh = args; + u32 id; + int ret = 0; + + KBASE_DEBUG_ASSERT(ukh != NULL); + + kbdev = kctx->kbdev; + id = ukh->id; + ukh->ret = MALI_ERROR_NONE; /* Be optimistic */ + +#ifdef CONFIG_MALI_DEBUG + wait_event(kbdev->driver_inactive_wait, + kbdev->driver_inactive == false); +#endif /* CONFIG_MALI_DEBUG */ + + if (UKP_FUNC_ID_CHECK_VERSION == id) { + struct uku_version_check_args *version_check; + struct kbase_ioctl_version_check version; + + if (args_size != sizeof(struct uku_version_check_args)) { + ukh->ret = MALI_ERROR_FUNCTION_FAILED; + return 0; + } + version_check = (struct uku_version_check_args *)args; + version.minor = min_t(int, 6, (int)version_check->minor); + version.major = version_check->major; + + kctx->api_version = KBASE_API_VERSION(version.major, version.minor); + + version_check->minor = version.minor; + version_check->major = version.major; + ukh->ret = MALI_ERROR_NONE; + return 0; + } + + /* block calls until version handshake */ + if (kctx->api_version == 0) + return -EINVAL; + + if (!atomic_read(&kctx->setup_complete)) { + struct kbase_uk_set_flags *kbase_set_flags; + + /* setup pending, try to signal that we'll do the setup, + * if setup was already in progress, err this call + */ + if (atomic_cmpxchg(&kctx->setup_in_progress, 0, 1) != 0) + return -EINVAL; + + /* if unexpected call, will stay stuck in setup mode + * (is it the only call we accept?) + */ + if (id != KBASE_FUNC_SET_FLAGS) + return -EINVAL; + + kbase_set_flags = (struct kbase_uk_set_flags *)args; + + /* if not matching the expected call, stay in setup mode */ + if (sizeof(*kbase_set_flags) != args_size) + goto bad_size; + + /* if bad flags, will stay stuck in setup mode */ + if (kbase_context_set_create_flags(kctx, + kbase_set_flags->create_flags) != 0) + ukh->ret = MALI_ERROR_FUNCTION_FAILED; + + atomic_set(&kctx->setup_complete, 1); + return 0; + } + + /* setup complete, perform normal operation */ + switch (id) { + case KBASE_FUNC_MEM_JIT_INIT: + { + struct kbase_uk_mem_jit_init *jit_init = args; + + if (sizeof(*jit_init) != args_size) + goto bad_size; + + if (kbase_region_tracker_init_jit(kctx, + jit_init->va_pages)) + ukh->ret = MALI_ERROR_FUNCTION_FAILED; + break; + } + case KBASE_FUNC_MEM_ALLOC: + { + struct kbase_uk_mem_alloc *mem = args; + struct kbase_va_region *reg; + + if (sizeof(*mem) != args_size) + goto bad_size; + +#if defined(CONFIG_64BIT) + if (!kbase_ctx_flag(kctx, KCTX_COMPAT)) { + /* force SAME_VA if a 64-bit client */ + mem->flags |= BASE_MEM_SAME_VA; + } +#endif + + reg = kbase_mem_alloc(kctx, mem->va_pages, + mem->commit_pages, mem->extent, + &mem->flags, &mem->gpu_va); + mem->va_alignment = 0; + + if (!reg) + ukh->ret = MALI_ERROR_FUNCTION_FAILED; + break; + } + case KBASE_FUNC_MEM_IMPORT: { + struct kbase_uk_mem_import *mem_import = args; + void __user *phandle; + + if (sizeof(*mem_import) != args_size) + goto bad_size; +#ifdef CONFIG_COMPAT + if (kbase_ctx_flag(kctx, KCTX_COMPAT)) + phandle = compat_ptr(mem_import->phandle); + else +#endif + phandle = u64_to_user_ptr(mem_import->phandle); + + if (mem_import->type == BASE_MEM_IMPORT_TYPE_INVALID) { + ukh->ret = MALI_ERROR_FUNCTION_FAILED; + break; + } + + if (kbase_mem_import(kctx, + (enum base_mem_import_type) + mem_import->type, + phandle, + 0, + &mem_import->gpu_va, + &mem_import->va_pages, + &mem_import->flags)) { + mem_import->type = BASE_MEM_IMPORT_TYPE_INVALID; + ukh->ret = MALI_ERROR_FUNCTION_FAILED; + } + break; + } + case KBASE_FUNC_MEM_ALIAS: { + struct kbase_uk_mem_alias *alias = args; + struct base_mem_aliasing_info __user *user_ai; + struct base_mem_aliasing_info *ai; + + if (sizeof(*alias) != args_size) + goto bad_size; + + if (alias->nents > 2048) { + ukh->ret = MALI_ERROR_FUNCTION_FAILED; + break; + } + if (!alias->nents) { + ukh->ret = MALI_ERROR_FUNCTION_FAILED; + break; + } + +#ifdef CONFIG_COMPAT + if (kbase_ctx_flag(kctx, KCTX_COMPAT)) + user_ai = compat_ptr(alias->ai); + else +#endif + user_ai = u64_to_user_ptr(alias->ai); + + ai = vmalloc(sizeof(*ai) * alias->nents); + + if (!ai) { + ukh->ret = MALI_ERROR_OUT_OF_MEMORY; + break; + } + + if (copy_from_user(ai, user_ai, + sizeof(*ai) * alias->nents)) { + ukh->ret = MALI_ERROR_FUNCTION_FAILED; + goto copy_failed; + } + + alias->gpu_va = kbase_mem_alias(kctx, &alias->flags, + alias->stride, + alias->nents, ai, + &alias->va_pages); + if (!alias->gpu_va) { + ukh->ret = MALI_ERROR_FUNCTION_FAILED; + goto no_alias; + } +no_alias: +copy_failed: + vfree(ai); + break; + } + case KBASE_FUNC_MEM_COMMIT: + { + struct kbase_uk_mem_commit *commit = args; + int ret; + + if (sizeof(*commit) != args_size) + goto bad_size; + + ret = kbase_mem_commit(kctx, commit->gpu_addr, + commit->pages); + + ukh->ret = MALI_ERROR_FUNCTION_FAILED; + commit->result_subcode = + BASE_BACKING_THRESHOLD_ERROR_INVALID_ARGUMENTS; + + if (ret == 0) { + ukh->ret = MALI_ERROR_NONE; + commit->result_subcode = + BASE_BACKING_THRESHOLD_OK; + } else if (ret == -ENOMEM) { + commit->result_subcode = + BASE_BACKING_THRESHOLD_ERROR_OOM; + } + + break; + } + + case KBASE_FUNC_MEM_QUERY: + { + struct kbase_uk_mem_query *query = args; + + if (sizeof(*query) != args_size) + goto bad_size; + + if (kbase_mem_query(kctx, query->gpu_addr, + query->query, &query->value) != 0) + ukh->ret = MALI_ERROR_FUNCTION_FAILED; + else + ukh->ret = MALI_ERROR_NONE; + break; + } + break; + + case KBASE_FUNC_MEM_FLAGS_CHANGE: + { + struct kbase_uk_mem_flags_change *fc = args; + + if (sizeof(*fc) != args_size) + goto bad_size; + + if (kbase_mem_flags_change(kctx, fc->gpu_va, + fc->flags, fc->mask) != 0) + ukh->ret = MALI_ERROR_FUNCTION_FAILED; + + break; + } + case KBASE_FUNC_MEM_FREE: + { + struct kbase_uk_mem_free *mem = args; + + if (sizeof(*mem) != args_size) + goto bad_size; + + if (kbase_mem_free(kctx, mem->gpu_addr) != 0) + ukh->ret = MALI_ERROR_FUNCTION_FAILED; + break; + } + + case KBASE_FUNC_JOB_SUBMIT: + { + struct kbase_uk_job_submit *job = args; + char __user *user_buf; + + if (sizeof(*job) != args_size) + goto bad_size; + +#ifdef CONFIG_COMPAT + if (kbase_ctx_flag(kctx, KCTX_COMPAT)) + user_buf = compat_ptr(job->addr); + else +#endif + user_buf = u64_to_user_ptr(job->addr); + + if (kbase_jd_submit(kctx, user_buf, + job->nr_atoms, + job->stride, + false) != 0) + ukh->ret = MALI_ERROR_FUNCTION_FAILED; + break; + } + + case KBASE_FUNC_SYNC: + { + struct kbase_uk_sync_now *sn = args; + + if (sizeof(*sn) != args_size) + goto bad_size; + + if (kbase_sync_now(kctx, &sn->sset.basep_sset) != 0) + ukh->ret = MALI_ERROR_FUNCTION_FAILED; + break; + } + + case KBASE_FUNC_DISJOINT_QUERY: + { + struct kbase_uk_disjoint_query *dquery = args; + + if (sizeof(*dquery) != args_size) + goto bad_size; + + /* Get the disjointness counter value. */ + dquery->counter = kbase_disjoint_event_get(kctx->kbdev); + break; + } + + case KBASE_FUNC_POST_TERM: + { + kbase_event_close(kctx); + break; + } + + case KBASE_FUNC_HWCNT_SETUP: + { + struct kbase_uk_hwcnt_setup *setup = args; + + if (sizeof(*setup) != args_size) + goto bad_size; + + mutex_lock(&kctx->vinstr_cli_lock); + if (kbase_vinstr_legacy_hwc_setup(kbdev->vinstr_ctx, + &kctx->vinstr_cli, setup) != 0) + ukh->ret = MALI_ERROR_FUNCTION_FAILED; + mutex_unlock(&kctx->vinstr_cli_lock); + break; + } + + case KBASE_FUNC_HWCNT_DUMP: + { + /* args ignored */ + mutex_lock(&kctx->vinstr_cli_lock); + if (kbase_vinstr_hwc_dump(kctx->vinstr_cli, + BASE_HWCNT_READER_EVENT_MANUAL) != 0) + ukh->ret = MALI_ERROR_FUNCTION_FAILED; + mutex_unlock(&kctx->vinstr_cli_lock); + break; + } + + case KBASE_FUNC_HWCNT_CLEAR: + { + /* args ignored */ + mutex_lock(&kctx->vinstr_cli_lock); + if (kbase_vinstr_hwc_clear(kctx->vinstr_cli) != 0) + ukh->ret = MALI_ERROR_FUNCTION_FAILED; + mutex_unlock(&kctx->vinstr_cli_lock); + break; + } + + case KBASE_FUNC_HWCNT_READER_SETUP: + { + struct kbase_uk_hwcnt_reader_setup *setup = args; + + if (sizeof(*setup) != args_size) + goto bad_size; + + mutex_lock(&kctx->vinstr_cli_lock); + if (kbase_vinstr_hwcnt_reader_setup(kbdev->vinstr_ctx, + setup) != 0) + ukh->ret = MALI_ERROR_FUNCTION_FAILED; + mutex_unlock(&kctx->vinstr_cli_lock); + break; + } + + case KBASE_FUNC_GPU_PROPS_REG_DUMP: + { + struct kbase_uk_gpuprops *setup = args; + + if (sizeof(*setup) != args_size) + goto bad_size; + + if (kbase_gpuprops_uk_get_props(kctx, setup) != 0) + ukh->ret = MALI_ERROR_FUNCTION_FAILED; + break; + } + case KBASE_FUNC_FIND_CPU_OFFSET: + { + struct kbase_uk_find_cpu_offset *find = args; + + if (sizeof(*find) != args_size) + goto bad_size; + + if (find->gpu_addr & ~PAGE_MASK) { + dev_warn(kbdev->dev, "kbase_legacy_dispatch case KBASE_FUNC_FIND_CPU_OFFSET: find->gpu_addr: passed parameter is invalid"); + goto out_bad; + } + + if (find->size > SIZE_MAX || find->cpu_addr > ULONG_MAX) { + ukh->ret = MALI_ERROR_FUNCTION_FAILED; + } else { + int err; + + err = kbasep_find_enclosing_cpu_mapping_offset( + kctx, + find->cpu_addr, + find->size, + &find->offset); + + if (err) + ukh->ret = MALI_ERROR_FUNCTION_FAILED; + } + break; + } + case KBASE_FUNC_GET_VERSION: + { + struct kbase_uk_get_ddk_version *get_version = (struct kbase_uk_get_ddk_version *)args; + + if (sizeof(*get_version) != args_size) + goto bad_size; + + /* version buffer size check is made in compile time assert */ + memcpy(get_version->version_buffer, KERNEL_SIDE_DDK_VERSION_STRING, sizeof(KERNEL_SIDE_DDK_VERSION_STRING)); + get_version->version_string_size = sizeof(KERNEL_SIDE_DDK_VERSION_STRING); + break; + } + + case KBASE_FUNC_STREAM_CREATE: + { +#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE) + struct kbase_uk_stream_create *screate = (struct kbase_uk_stream_create *)args; + + if (sizeof(*screate) != args_size) + goto bad_size; + + if (strnlen(screate->name, sizeof(screate->name)) >= sizeof(screate->name)) { + /* not NULL terminated */ + ukh->ret = MALI_ERROR_FUNCTION_FAILED; + break; + } + + if (kbase_sync_fence_stream_create(screate->name, + &screate->fd) != 0) + ukh->ret = MALI_ERROR_FUNCTION_FAILED; + else + ukh->ret = MALI_ERROR_NONE; +#else /* CONFIG_SYNC || CONFIG_SYNC_FILE */ + ukh->ret = MALI_ERROR_FUNCTION_FAILED; +#endif /* CONFIG_SYNC || CONFIG_SYNC_FILE */ + break; + } + case KBASE_FUNC_FENCE_VALIDATE: + { +#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE) + struct kbase_uk_fence_validate *fence_validate = (struct kbase_uk_fence_validate *)args; + + if (sizeof(*fence_validate) != args_size) + goto bad_size; + + if (kbase_sync_fence_validate(fence_validate->fd) != 0) + ukh->ret = MALI_ERROR_FUNCTION_FAILED; + else + ukh->ret = MALI_ERROR_NONE; +#endif /* CONFIG_SYNC || CONFIG_SYNC_FILE */ + break; + } + + case KBASE_FUNC_SET_TEST_DATA: + { +#if MALI_UNIT_TEST + struct kbase_uk_set_test_data *set_data = args; + + shared_kernel_test_data = set_data->test_data; + shared_kernel_test_data.kctx = (uintptr_t)kctx; + shared_kernel_test_data.mm = (uintptr_t)current->mm; + ukh->ret = MALI_ERROR_NONE; +#endif /* MALI_UNIT_TEST */ + break; + } + + case KBASE_FUNC_INJECT_ERROR: + { +#ifdef CONFIG_MALI_ERROR_INJECT + unsigned long flags; + struct kbase_error_params params = ((struct kbase_uk_error_params *)args)->params; + + /*mutex lock */ + spin_lock_irqsave(&kbdev->reg_op_lock, flags); + if (job_atom_inject_error(¶ms) != 0) + ukh->ret = MALI_ERROR_OUT_OF_MEMORY; + else + ukh->ret = MALI_ERROR_NONE; + spin_unlock_irqrestore(&kbdev->reg_op_lock, flags); + /*mutex unlock */ +#endif /* CONFIG_MALI_ERROR_INJECT */ + break; + } + + case KBASE_FUNC_MODEL_CONTROL: + { +#ifdef CONFIG_MALI_NO_MALI + unsigned long flags; + struct kbase_model_control_params params = + ((struct kbase_uk_model_control_params *)args)->params; + + /*mutex lock */ + spin_lock_irqsave(&kbdev->reg_op_lock, flags); + if (gpu_model_control(kbdev->model, ¶ms) != 0) + ukh->ret = MALI_ERROR_FUNCTION_FAILED; + else + ukh->ret = MALI_ERROR_NONE; + spin_unlock_irqrestore(&kbdev->reg_op_lock, flags); + /*mutex unlock */ +#endif /* CONFIG_MALI_NO_MALI */ + break; + } + + case KBASE_FUNC_GET_PROFILING_CONTROLS: + { + struct kbase_uk_profiling_controls *controls = + (struct kbase_uk_profiling_controls *)args; + u32 i; + + if (sizeof(*controls) != args_size) + goto bad_size; + + for (i = FBDUMP_CONTROL_MIN; i < FBDUMP_CONTROL_MAX; i++) + controls->profiling_controls[i] = + kbdev->kbase_profiling_controls[i]; + + break; + } + + /* used only for testing purposes; these controls are to be set by gator through gator API */ + case KBASE_FUNC_SET_PROFILING_CONTROLS: + { + struct kbase_uk_profiling_controls *controls = + (struct kbase_uk_profiling_controls *)args; + u32 i; + + if (sizeof(*controls) != args_size) + goto bad_size; + + for (i = FBDUMP_CONTROL_MIN; i < FBDUMP_CONTROL_MAX; i++) + _mali_profiling_control(i, controls->profiling_controls[i]); + + break; + } + + case KBASE_FUNC_DEBUGFS_MEM_PROFILE_ADD: + { + struct kbase_uk_debugfs_mem_profile_add *add_data = + (struct kbase_uk_debugfs_mem_profile_add *)args; + char *buf; + char __user *user_buf; + + if (sizeof(*add_data) != args_size) + goto bad_size; + + if (add_data->len > KBASE_MEM_PROFILE_MAX_BUF_SIZE) { + dev_err(kbdev->dev, "buffer too big\n"); + goto out_bad; + } + +#ifdef CONFIG_COMPAT + if (kbase_ctx_flag(kctx, KCTX_COMPAT)) + user_buf = compat_ptr(add_data->buf); + else +#endif + user_buf = u64_to_user_ptr(add_data->buf); + + buf = kmalloc(add_data->len, GFP_KERNEL); + if (ZERO_OR_NULL_PTR(buf)) + goto out_bad; + + if (0 != copy_from_user(buf, user_buf, add_data->len)) { + ukh->ret = MALI_ERROR_FUNCTION_FAILED; + kfree(buf); + goto out_bad; + } + + if (kbasep_mem_profile_debugfs_insert(kctx, buf, + add_data->len)) { + ukh->ret = MALI_ERROR_FUNCTION_FAILED; + goto out_bad; + } + + break; + } + +#ifdef CONFIG_MALI_NO_MALI + case KBASE_FUNC_SET_PRFCNT_VALUES: + { + + struct kbase_uk_prfcnt_values *params = + ((struct kbase_uk_prfcnt_values *)args); + gpu_model_set_dummy_prfcnt_sample(params->data, + params->size); + + break; + } +#endif /* CONFIG_MALI_NO_MALI */ +#ifdef BASE_LEGACY_UK10_4_SUPPORT + case KBASE_FUNC_TLSTREAM_ACQUIRE_V10_4: + { + struct kbase_uk_tlstream_acquire_v10_4 *tlstream_acquire + = args; + int ret; + + if (sizeof(*tlstream_acquire) != args_size) + goto bad_size; + + ret = kbase_tlstream_acquire( + kctx, 0); + if (ret < 0) + ukh->ret = MALI_ERROR_FUNCTION_FAILED; + else + tlstream_acquire->fd = ret; + break; + } +#endif /* BASE_LEGACY_UK10_4_SUPPORT */ + case KBASE_FUNC_TLSTREAM_ACQUIRE: + { + struct kbase_uk_tlstream_acquire *tlstream_acquire = + args; + int ret; + + if (sizeof(*tlstream_acquire) != args_size) + goto bad_size; + + if (tlstream_acquire->flags & ~BASE_TLSTREAM_FLAGS_MASK) + goto out_bad; + + ret = kbase_tlstream_acquire( + kctx, tlstream_acquire->flags); + if (ret < 0) + ukh->ret = MALI_ERROR_FUNCTION_FAILED; + else + tlstream_acquire->fd = ret; + break; + } + case KBASE_FUNC_TLSTREAM_FLUSH: + { + struct kbase_uk_tlstream_flush *tlstream_flush = + args; + + if (sizeof(*tlstream_flush) != args_size) + goto bad_size; + + kbase_tlstream_flush_streams(); + break; + } +#if MALI_UNIT_TEST + case KBASE_FUNC_TLSTREAM_TEST: + { + struct kbase_uk_tlstream_test *tlstream_test = args; + + if (sizeof(*tlstream_test) != args_size) + goto bad_size; + + kbase_tlstream_test( + tlstream_test->tpw_count, + tlstream_test->msg_delay, + tlstream_test->msg_count, + tlstream_test->aux_msg); + break; + } + case KBASE_FUNC_TLSTREAM_STATS: + { + struct kbase_uk_tlstream_stats *tlstream_stats = args; + + if (sizeof(*tlstream_stats) != args_size) + goto bad_size; + + kbase_tlstream_stats( + &tlstream_stats->bytes_collected, + &tlstream_stats->bytes_generated); + break; + } +#endif /* MALI_UNIT_TEST */ + + case KBASE_FUNC_GET_CONTEXT_ID: + { + struct kbase_uk_context_id *info = args; + + info->id = kctx->id; + break; + } + + case KBASE_FUNC_SOFT_EVENT_UPDATE: + { + struct kbase_uk_soft_event_update *update = args; + + if (sizeof(*update) != args_size) + goto bad_size; + + if (((update->new_status != BASE_JD_SOFT_EVENT_SET) && + (update->new_status != BASE_JD_SOFT_EVENT_RESET)) || + (update->flags != 0)) + goto out_bad; + + if (kbase_soft_event_update(kctx, update->evt, + update->new_status)) + ukh->ret = MALI_ERROR_FUNCTION_FAILED; + + break; + } + + default: + dev_err(kbdev->dev, "unknown ioctl %u\n", id); + goto out_bad; + } + + return ret; + + bad_size: + dev_err(kbdev->dev, "Wrong syscall size (%d) for %08x\n", args_size, id); + out_bad: + return -EINVAL; +} + +static struct kbase_device *to_kbase_device(struct device *dev) +{ + return dev_get_drvdata(dev); +} + +static int assign_irqs(struct platform_device *pdev) +{ + struct kbase_device *kbdev = to_kbase_device(&pdev->dev); + int i; + + if (!kbdev) + return -ENODEV; + + /* 3 IRQ resources */ + for (i = 0; i < 3; i++) { + struct resource *irq_res; + int irqtag; + + irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, i); + if (!irq_res) { + dev_err(kbdev->dev, "No IRQ resource at index %d\n", i); + return -ENOENT; + } + +#ifdef CONFIG_OF + if (!strncmp(irq_res->name, "job", 4)) { + irqtag = JOB_IRQ_TAG; + } else if (!strncmp(irq_res->name, "mmu", 4)) { + irqtag = MMU_IRQ_TAG; + } else if (!strncmp(irq_res->name, "gpu", 4)) { + irqtag = GPU_IRQ_TAG; + } else { + dev_err(&pdev->dev, "Invalid irq res name: '%s'\n", + irq_res->name); + return -EINVAL; + } +#else + irqtag = i; +#endif /* CONFIG_OF */ + kbdev->irqs[irqtag].irq = irq_res->start; + kbdev->irqs[irqtag].flags = irq_res->flags & IRQF_TRIGGER_MASK; + } + + return 0; +} + +/* + * API to acquire device list mutex and + * return pointer to the device list head + */ +const struct list_head *kbase_dev_list_get(void) +{ + mutex_lock(&kbase_dev_list_lock); + return &kbase_dev_list; +} +KBASE_EXPORT_TEST_API(kbase_dev_list_get); + +/* API to release the device list mutex */ +void kbase_dev_list_put(const struct list_head *dev_list) +{ + mutex_unlock(&kbase_dev_list_lock); +} +KBASE_EXPORT_TEST_API(kbase_dev_list_put); + +/* Find a particular kbase device (as specified by minor number), or find the "first" device if -1 is specified */ +struct kbase_device *kbase_find_device(int minor) +{ + struct kbase_device *kbdev = NULL; + struct list_head *entry; + const struct list_head *dev_list = kbase_dev_list_get(); + + list_for_each(entry, dev_list) { + struct kbase_device *tmp; + + tmp = list_entry(entry, struct kbase_device, entry); + if (tmp->mdev.minor == minor || minor == -1) { + kbdev = tmp; + get_device(kbdev->dev); + break; + } + } + kbase_dev_list_put(dev_list); + + return kbdev; +} +EXPORT_SYMBOL(kbase_find_device); + +void kbase_release_device(struct kbase_device *kbdev) +{ + put_device(kbdev->dev); +} +EXPORT_SYMBOL(kbase_release_device); + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) && \ + !(LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 28) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)) +/* + * Older versions, before v4.6, of the kernel doesn't have + * kstrtobool_from_user(), except longterm 4.4.y which had it added in 4.4.28 + */ +static int kstrtobool_from_user(const char __user *s, size_t count, bool *res) +{ + char buf[32]; + + count = min(sizeof(buf), count); + + if (copy_from_user(buf, s, count)) + return -EFAULT; + buf[count] = '\0'; + + return strtobool(buf, res); +} +#endif + +static ssize_t write_ctx_infinite_cache(struct file *f, const char __user *ubuf, size_t size, loff_t *off) +{ + struct kbase_context *kctx = f->private_data; + int err; + bool value; + + err = kstrtobool_from_user(ubuf, size, &value); + if (err) + return err; + + if (value) + kbase_ctx_flag_set(kctx, KCTX_INFINITE_CACHE); + else + kbase_ctx_flag_clear(kctx, KCTX_INFINITE_CACHE); + + return size; +} + +static ssize_t read_ctx_infinite_cache(struct file *f, char __user *ubuf, size_t size, loff_t *off) +{ + struct kbase_context *kctx = f->private_data; + char buf[32]; + int count; + bool value; + + value = kbase_ctx_flag(kctx, KCTX_INFINITE_CACHE); + + count = scnprintf(buf, sizeof(buf), "%s\n", value ? "Y" : "N"); + + return simple_read_from_buffer(ubuf, size, off, buf, count); +} + +static const struct file_operations kbase_infinite_cache_fops = { + .open = simple_open, + .write = write_ctx_infinite_cache, + .read = read_ctx_infinite_cache, +}; + +static int kbase_open(struct inode *inode, struct file *filp) +{ + struct kbase_device *kbdev = NULL; + struct kbase_context *kctx; + int ret = 0; +#ifdef CONFIG_DEBUG_FS + char kctx_name[64]; +#endif + + kbdev = kbase_find_device(iminor(inode)); + + if (!kbdev) + return -ENODEV; + + kctx = kbase_create_context(kbdev, is_compat_task()); + if (!kctx) { + ret = -ENOMEM; + goto out; + } + + init_waitqueue_head(&kctx->event_queue); + filp->private_data = kctx; + filp->f_mode |= FMODE_UNSIGNED_OFFSET; + kctx->filp = filp; + + if (kbdev->infinite_cache_active_default) + kbase_ctx_flag_set(kctx, KCTX_INFINITE_CACHE); + +#ifdef CONFIG_DEBUG_FS + snprintf(kctx_name, 64, "%d_%d", kctx->tgid, kctx->id); + + kctx->kctx_dentry = debugfs_create_dir(kctx_name, + kbdev->debugfs_ctx_directory); + + if (IS_ERR_OR_NULL(kctx->kctx_dentry)) { + ret = -ENOMEM; + goto out; + } + + debugfs_create_file("infinite_cache", 0644, kctx->kctx_dentry, + kctx, &kbase_infinite_cache_fops); + + mutex_init(&kctx->mem_profile_lock); + + kbasep_jd_debugfs_ctx_init(kctx); + kbase_debug_mem_view_init(filp); + + kbase_debug_job_fault_context_init(kctx); + + kbase_mem_pool_debugfs_init(kctx->kctx_dentry, &kctx->mem_pool, &kctx->lp_mem_pool); + + kbase_jit_debugfs_init(kctx); +#endif /* CONFIG_DEBUG_FS */ + + dev_dbg(kbdev->dev, "created base context\n"); + + { + struct kbasep_kctx_list_element *element; + + element = kzalloc(sizeof(*element), GFP_KERNEL); + if (element) { + mutex_lock(&kbdev->kctx_list_lock); + element->kctx = kctx; + list_add(&element->link, &kbdev->kctx_list); + KBASE_TLSTREAM_TL_NEW_CTX( + element->kctx, + element->kctx->id, + (u32)(element->kctx->tgid)); + mutex_unlock(&kbdev->kctx_list_lock); + } else { + /* we don't treat this as a fail - just warn about it */ + dev_warn(kbdev->dev, "couldn't add kctx to kctx_list\n"); + } + } + return 0; + + out: + kbase_release_device(kbdev); + return ret; +} + +static int kbase_release(struct inode *inode, struct file *filp) +{ + struct kbase_context *kctx = filp->private_data; + struct kbase_device *kbdev = kctx->kbdev; + struct kbasep_kctx_list_element *element, *tmp; + bool found_element = false; + + KBASE_TLSTREAM_TL_DEL_CTX(kctx); + +#ifdef CONFIG_DEBUG_FS + kbasep_mem_profile_debugfs_remove(kctx); + kbase_debug_job_fault_context_term(kctx); +#endif + + mutex_lock(&kbdev->kctx_list_lock); + list_for_each_entry_safe(element, tmp, &kbdev->kctx_list, link) { + if (element->kctx == kctx) { + list_del(&element->link); + kfree(element); + found_element = true; + } + } + mutex_unlock(&kbdev->kctx_list_lock); + if (!found_element) + dev_warn(kbdev->dev, "kctx not in kctx_list\n"); + + filp->private_data = NULL; + + mutex_lock(&kctx->vinstr_cli_lock); + /* If this client was performing hwcnt dumping and did not explicitly + * detach itself, remove it from the vinstr core now */ + if (kctx->vinstr_cli) { + struct kbase_uk_hwcnt_setup setup; + + setup.dump_buffer = 0llu; + kbase_vinstr_legacy_hwc_setup( + kbdev->vinstr_ctx, &kctx->vinstr_cli, &setup); + } + mutex_unlock(&kctx->vinstr_cli_lock); + + kbase_destroy_context(kctx); + + dev_dbg(kbdev->dev, "deleted base context\n"); + kbase_release_device(kbdev); + return 0; +} + +#define CALL_MAX_SIZE 536 + +static long kbase_legacy_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + u64 msg[(CALL_MAX_SIZE + 7) >> 3] = { 0xdeadbeefdeadbeefull }; /* alignment fixup */ + u32 size = _IOC_SIZE(cmd); + struct kbase_context *kctx = filp->private_data; + + if (size > CALL_MAX_SIZE) + return -ENOTTY; + + if (0 != copy_from_user(&msg, (void __user *)arg, size)) { + dev_err(kctx->kbdev->dev, "failed to copy ioctl argument into kernel space\n"); + return -EFAULT; + } + + if (kbase_legacy_dispatch(kctx, &msg, size) != 0) + return -EFAULT; + + if (0 != copy_to_user((void __user *)arg, &msg, size)) { + dev_err(kctx->kbdev->dev, "failed to copy results of UK call back to user space\n"); + return -EFAULT; + } + return 0; +} + +static int kbase_api_set_flags(struct kbase_context *kctx, + struct kbase_ioctl_set_flags *flags) +{ + int err; + + /* setup pending, try to signal that we'll do the setup, + * if setup was already in progress, err this call + */ + if (atomic_cmpxchg(&kctx->setup_in_progress, 0, 1) != 0) + return -EINVAL; + + err = kbase_context_set_create_flags(kctx, flags->create_flags); + /* if bad flags, will stay stuck in setup mode */ + if (err) + return err; + + atomic_set(&kctx->setup_complete, 1); + return 0; +} + +static int kbase_api_job_submit(struct kbase_context *kctx, + struct kbase_ioctl_job_submit *submit) +{ + return kbase_jd_submit(kctx, u64_to_user_ptr(submit->addr), + submit->nr_atoms, + submit->stride, false); +} + +static int kbase_api_get_gpuprops(struct kbase_context *kctx, + struct kbase_ioctl_get_gpuprops *get_props) +{ + struct kbase_gpu_props *kprops = &kctx->kbdev->gpu_props; + int err; + + if (get_props->flags != 0) { + dev_err(kctx->kbdev->dev, "Unsupported flags to get_gpuprops"); + return -EINVAL; + } + + if (get_props->size == 0) + return kprops->prop_buffer_size; + if (get_props->size < kprops->prop_buffer_size) + return -EINVAL; + + err = copy_to_user(u64_to_user_ptr(get_props->buffer), + kprops->prop_buffer, + kprops->prop_buffer_size); + if (err) + return -EFAULT; + return kprops->prop_buffer_size; +} + +static int kbase_api_post_term(struct kbase_context *kctx) +{ + kbase_event_close(kctx); + return 0; +} + +static int kbase_api_mem_alloc(struct kbase_context *kctx, + union kbase_ioctl_mem_alloc *alloc) +{ + struct kbase_va_region *reg; + u64 flags = alloc->in.flags; + u64 gpu_va; + +#if defined(CONFIG_64BIT) + if (!kbase_ctx_flag(kctx, KCTX_COMPAT)) { + /* force SAME_VA if a 64-bit client */ + flags |= BASE_MEM_SAME_VA; + } +#endif + + reg = kbase_mem_alloc(kctx, alloc->in.va_pages, + alloc->in.commit_pages, + alloc->in.extent, + &flags, &gpu_va); + + if (!reg) + return -ENOMEM; + + alloc->out.flags = flags; + alloc->out.gpu_va = gpu_va; + + return 0; +} + +static int kbase_api_mem_query(struct kbase_context *kctx, + union kbase_ioctl_mem_query *query) +{ + return kbase_mem_query(kctx, query->in.gpu_addr, + query->in.query, &query->out.value); +} + +static int kbase_api_mem_free(struct kbase_context *kctx, + struct kbase_ioctl_mem_free *free) +{ + return kbase_mem_free(kctx, free->gpu_addr); +} + +static int kbase_api_hwcnt_reader_setup(struct kbase_context *kctx, + struct kbase_ioctl_hwcnt_reader_setup *setup) +{ + int ret; + struct kbase_uk_hwcnt_reader_setup args = { + .buffer_count = setup->buffer_count, + .jm_bm = setup->jm_bm, + .shader_bm = setup->shader_bm, + .tiler_bm = setup->tiler_bm, + .mmu_l2_bm = setup->mmu_l2_bm + }; + + mutex_lock(&kctx->vinstr_cli_lock); + ret = kbase_vinstr_hwcnt_reader_setup(kctx->kbdev->vinstr_ctx, &args); + mutex_unlock(&kctx->vinstr_cli_lock); + + if (ret) + return ret; + return args.fd; +} + +static int kbase_api_hwcnt_enable(struct kbase_context *kctx, + struct kbase_ioctl_hwcnt_enable *enable) +{ + int ret; + struct kbase_uk_hwcnt_setup args = { + .dump_buffer = enable->dump_buffer, + .jm_bm = enable->jm_bm, + .shader_bm = enable->shader_bm, + .tiler_bm = enable->tiler_bm, + .mmu_l2_bm = enable->mmu_l2_bm + }; + + mutex_lock(&kctx->vinstr_cli_lock); + ret = kbase_vinstr_legacy_hwc_setup(kctx->kbdev->vinstr_ctx, + &kctx->vinstr_cli, &args); + mutex_unlock(&kctx->vinstr_cli_lock); + + return ret; +} + +static int kbase_api_hwcnt_dump(struct kbase_context *kctx) +{ + int ret; + + mutex_lock(&kctx->vinstr_cli_lock); + ret = kbase_vinstr_hwc_dump(kctx->vinstr_cli, + BASE_HWCNT_READER_EVENT_MANUAL); + mutex_unlock(&kctx->vinstr_cli_lock); + + return ret; +} + +static int kbase_api_hwcnt_clear(struct kbase_context *kctx) +{ + int ret; + + mutex_lock(&kctx->vinstr_cli_lock); + ret = kbase_vinstr_hwc_clear(kctx->vinstr_cli); + mutex_unlock(&kctx->vinstr_cli_lock); + + return ret; +} + +static int kbase_api_disjoint_query(struct kbase_context *kctx, + struct kbase_ioctl_disjoint_query *query) +{ + query->counter = kbase_disjoint_event_get(kctx->kbdev); + + return 0; +} + +static int kbase_api_get_ddk_version(struct kbase_context *kctx, + struct kbase_ioctl_get_ddk_version *version) +{ + int ret; + int len = sizeof(KERNEL_SIDE_DDK_VERSION_STRING); + + if (version->version_buffer == 0) + return len; + + if (version->size < len) + return -EOVERFLOW; + + ret = copy_to_user(u64_to_user_ptr(version->version_buffer), + KERNEL_SIDE_DDK_VERSION_STRING, + sizeof(KERNEL_SIDE_DDK_VERSION_STRING)); + + if (ret) + return -EFAULT; + + return len; +} + +static int kbase_api_mem_jit_init(struct kbase_context *kctx, + struct kbase_ioctl_mem_jit_init *jit_init) +{ + return kbase_region_tracker_init_jit(kctx, jit_init->va_pages); +} + +static int kbase_api_mem_sync(struct kbase_context *kctx, + struct kbase_ioctl_mem_sync *sync) +{ + struct basep_syncset sset = { + .mem_handle.basep.handle = sync->handle, + .user_addr = sync->user_addr, + .size = sync->size, + .type = sync->type + }; + + return kbase_sync_now(kctx, &sset); +} + +static int kbase_api_mem_find_cpu_offset(struct kbase_context *kctx, + union kbase_ioctl_mem_find_cpu_offset *find) +{ + return kbasep_find_enclosing_cpu_mapping_offset( + kctx, + find->in.cpu_addr, + find->in.size, + &find->out.offset); +} + +static int kbase_api_get_context_id(struct kbase_context *kctx, + struct kbase_ioctl_get_context_id *info) +{ + info->id = kctx->id; + + return 0; +} + +static int kbase_api_tlstream_acquire(struct kbase_context *kctx, + struct kbase_ioctl_tlstream_acquire *acquire) +{ + return kbase_tlstream_acquire(kctx, acquire->flags); +} + +static int kbase_api_tlstream_flush(struct kbase_context *kctx) +{ + kbase_tlstream_flush_streams(); + + return 0; +} + +static int kbase_api_mem_commit(struct kbase_context *kctx, + struct kbase_ioctl_mem_commit *commit) +{ + return kbase_mem_commit(kctx, commit->gpu_addr, commit->pages); +} + +static int kbase_api_mem_alias(struct kbase_context *kctx, + union kbase_ioctl_mem_alias *alias) +{ + struct base_mem_aliasing_info *ai; + u64 flags; + int err; + + if (alias->in.nents == 0 || alias->in.nents > 2048) + return -EINVAL; + + ai = vmalloc(sizeof(*ai) * alias->in.nents); + if (!ai) + return -ENOMEM; + + err = copy_from_user(ai, + u64_to_user_ptr(alias->in.aliasing_info), + sizeof(*ai) * alias->in.nents); + if (err) { + vfree(ai); + return -EFAULT; + } + + flags = alias->in.flags; + + alias->out.gpu_va = kbase_mem_alias(kctx, &flags, + alias->in.stride, alias->in.nents, + ai, &alias->out.va_pages); + + alias->out.flags = flags; + + vfree(ai); + + if (alias->out.gpu_va == 0) + return -ENOMEM; + + return 0; +} + +static int kbase_api_mem_import(struct kbase_context *kctx, + union kbase_ioctl_mem_import *import) +{ + int ret; + u64 flags = import->in.flags; + + ret = kbase_mem_import(kctx, + import->in.type, + u64_to_user_ptr(import->in.phandle), + import->in.padding, + &import->out.gpu_va, + &import->out.va_pages, + &flags); + + import->out.flags = flags; + + return ret; +} + +static int kbase_api_mem_flags_change(struct kbase_context *kctx, + struct kbase_ioctl_mem_flags_change *change) +{ + return kbase_mem_flags_change(kctx, change->gpu_va, + change->flags, change->mask); +} + +static int kbase_api_stream_create(struct kbase_context *kctx, + struct kbase_ioctl_stream_create *stream) +{ +#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE) + int fd, ret; + + /* Name must be NULL-terminated and padded with NULLs, so check last + * character is NULL + */ + if (stream->name[sizeof(stream->name)-1] != 0) + return -EINVAL; + + ret = kbase_sync_fence_stream_create(stream->name, &fd); + + if (ret) + return ret; + return fd; +#else + return -ENOENT; +#endif +} + +static int kbase_api_fence_validate(struct kbase_context *kctx, + struct kbase_ioctl_fence_validate *validate) +{ +#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE) + return kbase_sync_fence_validate(validate->fd); +#else + return -ENOENT; +#endif +} + +static int kbase_api_get_profiling_controls(struct kbase_context *kctx, + struct kbase_ioctl_get_profiling_controls *controls) +{ + int ret; + + if (controls->count > (FBDUMP_CONTROL_MAX - FBDUMP_CONTROL_MIN)) + return -EINVAL; + + ret = copy_to_user(u64_to_user_ptr(controls->buffer), + &kctx->kbdev->kbase_profiling_controls[ + FBDUMP_CONTROL_MIN], + controls->count * sizeof(u32)); + + if (ret) + return -EFAULT; + return 0; +} + +static int kbase_api_mem_profile_add(struct kbase_context *kctx, + struct kbase_ioctl_mem_profile_add *data) +{ + char *buf; + int err; + + if (data->len > KBASE_MEM_PROFILE_MAX_BUF_SIZE) { + dev_err(kctx->kbdev->dev, "mem_profile_add: buffer too big\n"); + return -EINVAL; + } + + buf = kmalloc(data->len, GFP_KERNEL); + if (ZERO_OR_NULL_PTR(buf)) + return -ENOMEM; + + err = copy_from_user(buf, u64_to_user_ptr(data->buffer), + data->len); + if (err) { + kfree(buf); + return -EFAULT; + } + + return kbasep_mem_profile_debugfs_insert(kctx, buf, data->len); +} + +static int kbase_api_soft_event_update(struct kbase_context *kctx, + struct kbase_ioctl_soft_event_update *update) +{ + if (update->flags != 0) + return -EINVAL; + + return kbase_soft_event_update(kctx, update->event, update->new_status); +} + +#if MALI_UNIT_TEST +static int kbase_api_tlstream_test(struct kbase_context *kctx, + struct kbase_ioctl_tlstream_test *test) +{ + kbase_tlstream_test( + test->tpw_count, + test->msg_delay, + test->msg_count, + test->aux_msg); + + return 0; +} + +static int kbase_api_tlstream_stats(struct kbase_context *kctx, + struct kbase_ioctl_tlstream_stats *stats) +{ + kbase_tlstream_stats( + &stats->bytes_collected, + &stats->bytes_generated); + + return 0; +} +#endif /* MALI_UNIT_TEST */ + +#define KBASE_HANDLE_IOCTL(cmd, function) \ + case cmd: \ + do { \ + BUILD_BUG_ON(_IOC_DIR(cmd) != _IOC_NONE); \ + return function(kctx); \ + } while (0) + +#define KBASE_HANDLE_IOCTL_IN(cmd, function, type) \ + case cmd: \ + do { \ + type param; \ + int err; \ + BUILD_BUG_ON(_IOC_DIR(cmd) != _IOC_WRITE); \ + BUILD_BUG_ON(sizeof(param) != _IOC_SIZE(cmd)); \ + err = copy_from_user(¶m, uarg, sizeof(param)); \ + if (err) \ + return -EFAULT; \ + return function(kctx, ¶m); \ + } while (0) + +#define KBASE_HANDLE_IOCTL_OUT(cmd, function, type) \ + case cmd: \ + do { \ + type param; \ + int ret, err; \ + BUILD_BUG_ON(_IOC_DIR(cmd) != _IOC_READ); \ + BUILD_BUG_ON(sizeof(param) != _IOC_SIZE(cmd)); \ + ret = function(kctx, ¶m); \ + err = copy_to_user(uarg, ¶m, sizeof(param)); \ + if (err) \ + return -EFAULT; \ + return ret; \ + } while (0) + +#define KBASE_HANDLE_IOCTL_INOUT(cmd, function, type) \ + case cmd: \ + do { \ + type param; \ + int ret, err; \ + BUILD_BUG_ON(_IOC_DIR(cmd) != (_IOC_WRITE|_IOC_READ)); \ + BUILD_BUG_ON(sizeof(param) != _IOC_SIZE(cmd)); \ + err = copy_from_user(¶m, uarg, sizeof(param)); \ + if (err) \ + return -EFAULT; \ + ret = function(kctx, ¶m); \ + err = copy_to_user(uarg, ¶m, sizeof(param)); \ + if (err) \ + return -EFAULT; \ + return ret; \ + } while (0) + +static long kbase_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + struct kbase_context *kctx = filp->private_data; + struct kbase_device *kbdev = kctx->kbdev; + void __user *uarg = (void __user *)arg; + + /* The UK ioctl values overflow the cmd field causing the type to be + * incremented + */ + if (_IOC_TYPE(cmd) == LINUX_UK_BASE_MAGIC+2) + return kbase_legacy_ioctl(filp, cmd, arg); + + /* The UK version check IOCTL doesn't overflow the cmd field, so is + * handled separately here + */ + if (cmd == _IOC(_IOC_READ|_IOC_WRITE, LINUX_UK_BASE_MAGIC, + UKP_FUNC_ID_CHECK_VERSION, + sizeof(struct uku_version_check_args))) + return kbase_legacy_ioctl(filp, cmd, arg); + + /* Only these ioctls are available until setup is complete */ + switch (cmd) { + KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_VERSION_CHECK, + kbase_api_handshake, + struct kbase_ioctl_version_check); + KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_SET_FLAGS, + kbase_api_set_flags, + struct kbase_ioctl_set_flags); + } + + /* Block call until version handshake and setup is complete */ + if (kctx->api_version == 0 || !atomic_read(&kctx->setup_complete)) + return -EINVAL; + + /* Normal ioctls */ + switch (cmd) { + KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_JOB_SUBMIT, + kbase_api_job_submit, + struct kbase_ioctl_job_submit); + KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_GET_GPUPROPS, + kbase_api_get_gpuprops, + struct kbase_ioctl_get_gpuprops); + KBASE_HANDLE_IOCTL(KBASE_IOCTL_POST_TERM, + kbase_api_post_term); + KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_MEM_ALLOC, + kbase_api_mem_alloc, + union kbase_ioctl_mem_alloc); + KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_MEM_QUERY, + kbase_api_mem_query, + union kbase_ioctl_mem_query); + KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_FREE, + kbase_api_mem_free, + struct kbase_ioctl_mem_free); + KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_HWCNT_READER_SETUP, + kbase_api_hwcnt_reader_setup, + struct kbase_ioctl_hwcnt_reader_setup); + KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_HWCNT_ENABLE, + kbase_api_hwcnt_enable, + struct kbase_ioctl_hwcnt_enable); + KBASE_HANDLE_IOCTL(KBASE_IOCTL_HWCNT_DUMP, + kbase_api_hwcnt_dump); + KBASE_HANDLE_IOCTL(KBASE_IOCTL_HWCNT_CLEAR, + kbase_api_hwcnt_clear); + KBASE_HANDLE_IOCTL_OUT(KBASE_IOCTL_DISJOINT_QUERY, + kbase_api_disjoint_query, + struct kbase_ioctl_disjoint_query); + KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_GET_DDK_VERSION, + kbase_api_get_ddk_version, + struct kbase_ioctl_get_ddk_version); + KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_JIT_INIT, + kbase_api_mem_jit_init, + struct kbase_ioctl_mem_jit_init); + KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_SYNC, + kbase_api_mem_sync, + struct kbase_ioctl_mem_sync); + KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_MEM_FIND_CPU_OFFSET, + kbase_api_mem_find_cpu_offset, + union kbase_ioctl_mem_find_cpu_offset); + KBASE_HANDLE_IOCTL_OUT(KBASE_IOCTL_GET_CONTEXT_ID, + kbase_api_get_context_id, + struct kbase_ioctl_get_context_id); + KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_TLSTREAM_ACQUIRE, + kbase_api_tlstream_acquire, + struct kbase_ioctl_tlstream_acquire); + KBASE_HANDLE_IOCTL(KBASE_IOCTL_TLSTREAM_FLUSH, + kbase_api_tlstream_flush); + KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_COMMIT, + kbase_api_mem_commit, + struct kbase_ioctl_mem_commit); + KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_MEM_ALIAS, + kbase_api_mem_alias, + union kbase_ioctl_mem_alias); + KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_MEM_IMPORT, + kbase_api_mem_import, + union kbase_ioctl_mem_import); + KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_FLAGS_CHANGE, + kbase_api_mem_flags_change, + struct kbase_ioctl_mem_flags_change); + KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_STREAM_CREATE, + kbase_api_stream_create, + struct kbase_ioctl_stream_create); + KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_FENCE_VALIDATE, + kbase_api_fence_validate, + struct kbase_ioctl_fence_validate); + KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_GET_PROFILING_CONTROLS, + kbase_api_get_profiling_controls, + struct kbase_ioctl_get_profiling_controls); + KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_PROFILE_ADD, + kbase_api_mem_profile_add, + struct kbase_ioctl_mem_profile_add); + KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_SOFT_EVENT_UPDATE, + kbase_api_soft_event_update, + struct kbase_ioctl_soft_event_update); + +#if MALI_UNIT_TEST + KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_TLSTREAM_TEST, + kbase_api_tlstream_test, + struct kbase_ioctl_tlstream_test); + KBASE_HANDLE_IOCTL_OUT(KBASE_IOCTL_TLSTREAM_STATS, + kbase_api_tlstream_stats, + struct kbase_ioctl_tlstream_stats); +#endif + } + + dev_warn(kbdev->dev, "Unknown ioctl 0x%x nr:%d", cmd, _IOC_NR(cmd)); + + return -ENOIOCTLCMD; +} + +static ssize_t kbase_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos) +{ + struct kbase_context *kctx = filp->private_data; + struct base_jd_event_v2 uevent; + int out_count = 0; + + if (count < sizeof(uevent)) + return -ENOBUFS; + + do { + while (kbase_event_dequeue(kctx, &uevent)) { + if (out_count > 0) + goto out; + + if (filp->f_flags & O_NONBLOCK) + return -EAGAIN; + + if (wait_event_interruptible(kctx->event_queue, + kbase_event_pending(kctx)) != 0) + return -ERESTARTSYS; + } + if (uevent.event_code == BASE_JD_EVENT_DRV_TERMINATED) { + if (out_count == 0) + return -EPIPE; + goto out; + } + + if (copy_to_user(buf, &uevent, sizeof(uevent)) != 0) + return -EFAULT; + + buf += sizeof(uevent); + out_count++; + count -= sizeof(uevent); + } while (count >= sizeof(uevent)); + + out: + return out_count * sizeof(uevent); +} + +static unsigned int kbase_poll(struct file *filp, poll_table *wait) +{ + struct kbase_context *kctx = filp->private_data; + + poll_wait(filp, &kctx->event_queue, wait); + if (kbase_event_pending(kctx)) + return POLLIN | POLLRDNORM; + + return 0; +} + +void kbase_event_wakeup(struct kbase_context *kctx) +{ + KBASE_DEBUG_ASSERT(kctx); + + wake_up_interruptible(&kctx->event_queue); +} + +KBASE_EXPORT_TEST_API(kbase_event_wakeup); + +static int kbase_check_flags(int flags) +{ + /* Enforce that the driver keeps the O_CLOEXEC flag so that execve() always + * closes the file descriptor in a child process. + */ + if (0 == (flags & O_CLOEXEC)) + return -EINVAL; + + return 0; +} + + +/** + * align_and_check - Align the specified pointer to the provided alignment and + * check that it is still in range. + * @gap_end: Highest possible start address for allocation (end of gap in + * address space) + * @gap_start: Start address of current memory area / gap in address space + * @info: vm_unmapped_area_info structure passed to caller, containing + * alignment, length and limits for the allocation + * @is_shader_code: True if the allocation is for shader code (which has + * additional alignment requirements) + * + * Return: true if gap_end is now aligned correctly and is still in range, + * false otherwise + */ +static bool align_and_check(unsigned long *gap_end, unsigned long gap_start, + struct vm_unmapped_area_info *info, bool is_shader_code) +{ + /* Compute highest gap address at the desired alignment */ + (*gap_end) -= info->length; + (*gap_end) -= (*gap_end - info->align_offset) & info->align_mask; + + if (is_shader_code) { + /* Check for 4GB boundary */ + if (0 == (*gap_end & BASE_MEM_MASK_4GB)) + (*gap_end) -= (info->align_offset ? info->align_offset : + info->length); + if (0 == ((*gap_end + info->length) & BASE_MEM_MASK_4GB)) + (*gap_end) -= (info->align_offset ? info->align_offset : + info->length); + + if (!(*gap_end & BASE_MEM_MASK_4GB) || !((*gap_end + + info->length) & BASE_MEM_MASK_4GB)) + return false; + } + + + if ((*gap_end < info->low_limit) || (*gap_end < gap_start)) + return false; + + + return true; +} + +/* The following function is taken from the kernel and just + * renamed. As it's not exported to modules we must copy-paste it here. + */ + +static unsigned long kbase_unmapped_area_topdown(struct vm_unmapped_area_info + *info, bool is_shader_code) +{ + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + unsigned long length, low_limit, high_limit, gap_start, gap_end; + + /* Adjust search length to account for worst case alignment overhead */ + length = info->length + info->align_mask; + if (length < info->length) + return -ENOMEM; + + /* + * Adjust search limits by the desired length. + * See implementation comment at top of unmapped_area(). + */ + gap_end = info->high_limit; + if (gap_end < length) + return -ENOMEM; + high_limit = gap_end - length; + + if (info->low_limit > high_limit) + return -ENOMEM; + low_limit = info->low_limit + length; + + /* Check highest gap, which does not precede any rbtree node */ + gap_start = mm->highest_vm_end; + if (gap_start <= high_limit) { + if (align_and_check(&gap_end, gap_start, info, is_shader_code)) + return gap_end; + } + + /* Check if rbtree root looks promising */ + if (RB_EMPTY_ROOT(&mm->mm_rb)) + return -ENOMEM; + vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb); + if (vma->rb_subtree_gap < length) + return -ENOMEM; + + while (true) { + /* Visit right subtree if it looks promising */ + gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0; + if (gap_start <= high_limit && vma->vm_rb.rb_right) { + struct vm_area_struct *right = + rb_entry(vma->vm_rb.rb_right, + struct vm_area_struct, vm_rb); + if (right->rb_subtree_gap >= length) { + vma = right; + continue; + } + } + +check_current: + /* Check if current node has a suitable gap */ + gap_end = vma->vm_start; + if (gap_end < low_limit) + return -ENOMEM; + if (gap_start <= high_limit && gap_end - gap_start >= length) { + /* We found a suitable gap. Clip it with the original + * high_limit. */ + if (gap_end > info->high_limit) + gap_end = info->high_limit; + + if (align_and_check(&gap_end, gap_start, info, + is_shader_code)) + return gap_end; + } + + /* Visit left subtree if it looks promising */ + if (vma->vm_rb.rb_left) { + struct vm_area_struct *left = + rb_entry(vma->vm_rb.rb_left, + struct vm_area_struct, vm_rb); + if (left->rb_subtree_gap >= length) { + vma = left; + continue; + } + } + + /* Go back up the rbtree to find next candidate node */ + while (true) { + struct rb_node *prev = &vma->vm_rb; + if (!rb_parent(prev)) + return -ENOMEM; + vma = rb_entry(rb_parent(prev), + struct vm_area_struct, vm_rb); + if (prev == vma->vm_rb.rb_right) { + gap_start = vma->vm_prev ? + vma->vm_prev->vm_end : 0; + goto check_current; + } + } + } + + return -ENOMEM; +} + +static unsigned long kbase_get_unmapped_area(struct file *filp, + const unsigned long addr, const unsigned long len, + const unsigned long pgoff, const unsigned long flags) +{ + /* based on get_unmapped_area, but simplified slightly due to that some + * values are known in advance */ + struct kbase_context *kctx = filp->private_data; + struct mm_struct *mm = current->mm; + struct vm_unmapped_area_info info; + unsigned long align_offset = 0; + unsigned long align_mask = 0; + unsigned long high_limit = mm->mmap_base; + unsigned long low_limit = PAGE_SIZE; + int cpu_va_bits = BITS_PER_LONG; + int gpu_pc_bits = + kctx->kbdev->gpu_props.props.core_props.log2_program_counter_size; + bool is_shader_code = false; + unsigned long ret; + + /* err on fixed address */ + if ((flags & MAP_FIXED) || addr) + return -EINVAL; + +#ifdef CONFIG_64BIT + /* too big? */ + if (len > TASK_SIZE - SZ_2M) + return -ENOMEM; + + if (!kbase_ctx_flag(kctx, KCTX_COMPAT)) { + + if (kbase_hw_has_feature(kctx->kbdev, + BASE_HW_FEATURE_33BIT_VA)) { + high_limit = kctx->same_va_end << PAGE_SHIFT; + } else { + high_limit = min_t(unsigned long, mm->mmap_base, + (kctx->same_va_end << PAGE_SHIFT)); + if (len >= SZ_2M) { + align_offset = SZ_2M; + align_mask = SZ_2M - 1; + } + } + + low_limit = SZ_2M; + } else { + cpu_va_bits = 32; + } +#endif /* CONFIG_64BIT */ + if ((PFN_DOWN(BASE_MEM_COOKIE_BASE) <= pgoff) && + (PFN_DOWN(BASE_MEM_FIRST_FREE_ADDRESS) > pgoff)) { + int cookie = pgoff - PFN_DOWN(BASE_MEM_COOKIE_BASE); + + if (!kctx->pending_regions[cookie]) + return -EINVAL; + + if (!(kctx->pending_regions[cookie]->flags & + KBASE_REG_GPU_NX)) { + if (cpu_va_bits > gpu_pc_bits) { + align_offset = 1ULL << gpu_pc_bits; + align_mask = align_offset - 1; + is_shader_code = true; + } + } +#ifndef CONFIG_64BIT + } else { + return current->mm->get_unmapped_area(filp, addr, len, pgoff, + flags); +#endif + } + + info.flags = 0; + info.length = len; + info.low_limit = low_limit; + info.high_limit = high_limit; + info.align_offset = align_offset; + info.align_mask = align_mask; + + ret = kbase_unmapped_area_topdown(&info, is_shader_code); + + if (IS_ERR_VALUE(ret) && high_limit == mm->mmap_base && + high_limit < (kctx->same_va_end << PAGE_SHIFT)) { + /* Retry above mmap_base */ + info.low_limit = mm->mmap_base; + info.high_limit = min_t(u64, TASK_SIZE, + (kctx->same_va_end << PAGE_SHIFT)); + + ret = kbase_unmapped_area_topdown(&info, is_shader_code); + } + + return ret; +} + +static const struct file_operations kbase_fops = { + .owner = THIS_MODULE, + .open = kbase_open, + .release = kbase_release, + .read = kbase_read, + .poll = kbase_poll, + .unlocked_ioctl = kbase_ioctl, + .compat_ioctl = kbase_ioctl, + .mmap = kbase_mmap, + .check_flags = kbase_check_flags, + .get_unmapped_area = kbase_get_unmapped_area, +}; + +/** + * show_policy - Show callback for the power_policy sysfs file. + * + * This function is called to get the contents of the power_policy sysfs + * file. This is a list of the available policies with the currently active one + * surrounded by square brackets. + * + * @dev: The device this sysfs file is for + * @attr: The attributes of the sysfs file + * @buf: The output buffer for the sysfs file contents + * + * Return: The number of bytes output to @buf. + */ +static ssize_t show_policy(struct device *dev, struct device_attribute *attr, char *const buf) +{ + struct kbase_device *kbdev; + const struct kbase_pm_policy *current_policy; + const struct kbase_pm_policy *const *policy_list; + int policy_count; + int i; + ssize_t ret = 0; + + kbdev = to_kbase_device(dev); + + if (!kbdev) + return -ENODEV; + + current_policy = kbase_pm_get_policy(kbdev); + + policy_count = kbase_pm_list_policies(&policy_list); + + for (i = 0; i < policy_count && ret < PAGE_SIZE; i++) { + if (policy_list[i] == current_policy) + ret += scnprintf(buf + ret, PAGE_SIZE - ret, "[%s] ", policy_list[i]->name); + else + ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s ", policy_list[i]->name); + } + + if (ret < PAGE_SIZE - 1) { + ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n"); + } else { + buf[PAGE_SIZE - 2] = '\n'; + buf[PAGE_SIZE - 1] = '\0'; + ret = PAGE_SIZE - 1; + } + + return ret; +} + +/** + * set_policy - Store callback for the power_policy sysfs file. + * + * This function is called when the power_policy sysfs file is written to. + * It matches the requested policy against the available policies and if a + * matching policy is found calls kbase_pm_set_policy() to change the + * policy. + * + * @dev: The device with sysfs file is for + * @attr: The attributes of the sysfs file + * @buf: The value written to the sysfs file + * @count: The number of bytes written to the sysfs file + * + * Return: @count if the function succeeded. An error code on failure. + */ +static ssize_t set_policy(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) +{ + struct kbase_device *kbdev; + const struct kbase_pm_policy *new_policy = NULL; + const struct kbase_pm_policy *const *policy_list; + int policy_count; + int i; + + kbdev = to_kbase_device(dev); + + if (!kbdev) + return -ENODEV; + + policy_count = kbase_pm_list_policies(&policy_list); + + for (i = 0; i < policy_count; i++) { + if (sysfs_streq(policy_list[i]->name, buf)) { + new_policy = policy_list[i]; + break; + } + } + + if (!new_policy) { + dev_err(dev, "power_policy: policy not found\n"); + return -EINVAL; + } + + kbase_pm_set_policy(kbdev, new_policy); + + return count; +} + +/* + * The sysfs file power_policy. + * + * This is used for obtaining information about the available policies, + * determining which policy is currently active, and changing the active + * policy. + */ +static DEVICE_ATTR(power_policy, S_IRUGO | S_IWUSR, show_policy, set_policy); + +/** + * show_ca_policy - Show callback for the core_availability_policy sysfs file. + * + * This function is called to get the contents of the core_availability_policy + * sysfs file. This is a list of the available policies with the currently + * active one surrounded by square brackets. + * + * @dev: The device this sysfs file is for + * @attr: The attributes of the sysfs file + * @buf: The output buffer for the sysfs file contents + * + * Return: The number of bytes output to @buf. + */ +static ssize_t show_ca_policy(struct device *dev, struct device_attribute *attr, char * const buf) +{ + struct kbase_device *kbdev; + const struct kbase_pm_ca_policy *current_policy; + const struct kbase_pm_ca_policy *const *policy_list; + int policy_count; + int i; + ssize_t ret = 0; + + kbdev = to_kbase_device(dev); + + if (!kbdev) + return -ENODEV; + + current_policy = kbase_pm_ca_get_policy(kbdev); + + policy_count = kbase_pm_ca_list_policies(&policy_list); + + for (i = 0; i < policy_count && ret < PAGE_SIZE; i++) { + if (policy_list[i] == current_policy) + ret += scnprintf(buf + ret, PAGE_SIZE - ret, "[%s] ", policy_list[i]->name); + else + ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s ", policy_list[i]->name); + } + + if (ret < PAGE_SIZE - 1) { + ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n"); + } else { + buf[PAGE_SIZE - 2] = '\n'; + buf[PAGE_SIZE - 1] = '\0'; + ret = PAGE_SIZE - 1; + } + + return ret; +} + +/** + * set_ca_policy - Store callback for the core_availability_policy sysfs file. + * + * This function is called when the core_availability_policy sysfs file is + * written to. It matches the requested policy against the available policies + * and if a matching policy is found calls kbase_pm_set_policy() to change + * the policy. + * + * @dev: The device with sysfs file is for + * @attr: The attributes of the sysfs file + * @buf: The value written to the sysfs file + * @count: The number of bytes written to the sysfs file + * + * Return: @count if the function succeeded. An error code on failure. + */ +static ssize_t set_ca_policy(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) +{ + struct kbase_device *kbdev; + const struct kbase_pm_ca_policy *new_policy = NULL; + const struct kbase_pm_ca_policy *const *policy_list; + int policy_count; + int i; + + kbdev = to_kbase_device(dev); + + if (!kbdev) + return -ENODEV; + + policy_count = kbase_pm_ca_list_policies(&policy_list); + + for (i = 0; i < policy_count; i++) { + if (sysfs_streq(policy_list[i]->name, buf)) { + new_policy = policy_list[i]; + break; + } + } + + if (!new_policy) { + dev_err(dev, "core_availability_policy: policy not found\n"); + return -EINVAL; + } + + kbase_pm_ca_set_policy(kbdev, new_policy); + + return count; +} + +/* + * The sysfs file core_availability_policy + * + * This is used for obtaining information about the available policies, + * determining which policy is currently active, and changing the active + * policy. + */ +static DEVICE_ATTR(core_availability_policy, S_IRUGO | S_IWUSR, show_ca_policy, set_ca_policy); + +/* + * show_core_mask - Show callback for the core_mask sysfs file. + * + * This function is called to get the contents of the core_mask sysfs file. + * + * @dev: The device this sysfs file is for + * @attr: The attributes of the sysfs file + * @buf: The output buffer for the sysfs file contents + * + * Return: The number of bytes output to @buf. + */ +static ssize_t show_core_mask(struct device *dev, struct device_attribute *attr, char * const buf) +{ + struct kbase_device *kbdev; + ssize_t ret = 0; + + kbdev = to_kbase_device(dev); + + if (!kbdev) + return -ENODEV; + + ret += scnprintf(buf + ret, PAGE_SIZE - ret, + "Current core mask (JS0) : 0x%llX\n", + kbdev->pm.debug_core_mask[0]); + ret += scnprintf(buf + ret, PAGE_SIZE - ret, + "Current core mask (JS1) : 0x%llX\n", + kbdev->pm.debug_core_mask[1]); + ret += scnprintf(buf + ret, PAGE_SIZE - ret, + "Current core mask (JS2) : 0x%llX\n", + kbdev->pm.debug_core_mask[2]); + ret += scnprintf(buf + ret, PAGE_SIZE - ret, + "Available core mask : 0x%llX\n", + kbdev->gpu_props.props.raw_props.shader_present); + + return ret; +} + +/** + * set_core_mask - Store callback for the core_mask sysfs file. + * + * This function is called when the core_mask sysfs file is written to. + * + * @dev: The device with sysfs file is for + * @attr: The attributes of the sysfs file + * @buf: The value written to the sysfs file + * @count: The number of bytes written to the sysfs file + * + * Return: @count if the function succeeded. An error code on failure. + */ +static ssize_t set_core_mask(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) +{ + struct kbase_device *kbdev; + u64 new_core_mask[3]; + int items; + + kbdev = to_kbase_device(dev); + + if (!kbdev) + return -ENODEV; + + items = sscanf(buf, "%llx %llx %llx", + &new_core_mask[0], &new_core_mask[1], + &new_core_mask[2]); + + if (items == 1) + new_core_mask[1] = new_core_mask[2] = new_core_mask[0]; + + if (items == 1 || items == 3) { + u64 shader_present = + kbdev->gpu_props.props.raw_props.shader_present; + u64 group0_core_mask = + kbdev->gpu_props.props.coherency_info.group[0]. + core_mask; + + if ((new_core_mask[0] & shader_present) != new_core_mask[0] || + !(new_core_mask[0] & group0_core_mask) || + (new_core_mask[1] & shader_present) != + new_core_mask[1] || + !(new_core_mask[1] & group0_core_mask) || + (new_core_mask[2] & shader_present) != + new_core_mask[2] || + !(new_core_mask[2] & group0_core_mask)) { + dev_err(dev, "power_policy: invalid core specification\n"); + return -EINVAL; + } + + if (kbdev->pm.debug_core_mask[0] != new_core_mask[0] || + kbdev->pm.debug_core_mask[1] != + new_core_mask[1] || + kbdev->pm.debug_core_mask[2] != + new_core_mask[2]) { + unsigned long flags; + + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); + + kbase_pm_set_debug_core_mask(kbdev, new_core_mask[0], + new_core_mask[1], new_core_mask[2]); + + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); + } + + return count; + } + + dev_err(kbdev->dev, "Couldn't process set_core_mask write operation.\n" + "Use format \n" + "or \n"); + return -EINVAL; +} + +/* + * The sysfs file core_mask. + * + * This is used to restrict shader core availability for debugging purposes. + * Reading it will show the current core mask and the mask of cores available. + * Writing to it will set the current core mask. + */ +static DEVICE_ATTR(core_mask, S_IRUGO | S_IWUSR, show_core_mask, set_core_mask); + +/** + * set_soft_job_timeout - Store callback for the soft_job_timeout sysfs + * file. + * + * @dev: The device this sysfs file is for. + * @attr: The attributes of the sysfs file. + * @buf: The value written to the sysfs file. + * @count: The number of bytes written to the sysfs file. + * + * This allows setting the timeout for software jobs. Waiting soft event wait + * jobs will be cancelled after this period expires, while soft fence wait jobs + * will print debug information if the fence debug feature is enabled. + * + * This is expressed in milliseconds. + * + * Return: count if the function succeeded. An error code on failure. + */ +static ssize_t set_soft_job_timeout(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct kbase_device *kbdev; + int soft_job_timeout_ms; + + kbdev = to_kbase_device(dev); + if (!kbdev) + return -ENODEV; + + if ((kstrtoint(buf, 0, &soft_job_timeout_ms) != 0) || + (soft_job_timeout_ms <= 0)) + return -EINVAL; + + atomic_set(&kbdev->js_data.soft_job_timeout_ms, + soft_job_timeout_ms); + + return count; +} + +/** + * show_soft_job_timeout - Show callback for the soft_job_timeout sysfs + * file. + * + * This will return the timeout for the software jobs. + * + * @dev: The device this sysfs file is for. + * @attr: The attributes of the sysfs file. + * @buf: The output buffer for the sysfs file contents. + * + * Return: The number of bytes output to buf. + */ +static ssize_t show_soft_job_timeout(struct device *dev, + struct device_attribute *attr, + char * const buf) +{ + struct kbase_device *kbdev; + + kbdev = to_kbase_device(dev); + if (!kbdev) + return -ENODEV; + + return scnprintf(buf, PAGE_SIZE, "%i\n", + atomic_read(&kbdev->js_data.soft_job_timeout_ms)); +} + +static DEVICE_ATTR(soft_job_timeout, S_IRUGO | S_IWUSR, + show_soft_job_timeout, set_soft_job_timeout); + +static u32 timeout_ms_to_ticks(struct kbase_device *kbdev, long timeout_ms, + int default_ticks, u32 old_ticks) +{ + if (timeout_ms > 0) { + u64 ticks = timeout_ms * 1000000ULL; + do_div(ticks, kbdev->js_data.scheduling_period_ns); + if (!ticks) + return 1; + return ticks; + } else if (timeout_ms < 0) { + return default_ticks; + } else { + return old_ticks; + } +} + +/** + * set_js_timeouts - Store callback for the js_timeouts sysfs file. + * + * This function is called to get the contents of the js_timeouts sysfs + * file. This file contains five values separated by whitespace. The values + * are basically the same as %JS_SOFT_STOP_TICKS, %JS_HARD_STOP_TICKS_SS, + * %JS_HARD_STOP_TICKS_DUMPING, %JS_RESET_TICKS_SS, %JS_RESET_TICKS_DUMPING + * configuration values (in that order), with the difference that the js_timeout + * values are expressed in MILLISECONDS. + * + * The js_timeouts sysfile file allows the current values in + * use by the job scheduler to get override. Note that a value needs to + * be other than 0 for it to override the current job scheduler value. + * + * @dev: The device with sysfs file is for + * @attr: The attributes of the sysfs file + * @buf: The value written to the sysfs file + * @count: The number of bytes written to the sysfs file + * + * Return: @count if the function succeeded. An error code on failure. + */ +static ssize_t set_js_timeouts(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) +{ + struct kbase_device *kbdev; + int items; + long js_soft_stop_ms; + long js_soft_stop_ms_cl; + long js_hard_stop_ms_ss; + long js_hard_stop_ms_cl; + long js_hard_stop_ms_dumping; + long js_reset_ms_ss; + long js_reset_ms_cl; + long js_reset_ms_dumping; + + kbdev = to_kbase_device(dev); + if (!kbdev) + return -ENODEV; + + items = sscanf(buf, "%ld %ld %ld %ld %ld %ld %ld %ld", + &js_soft_stop_ms, &js_soft_stop_ms_cl, + &js_hard_stop_ms_ss, &js_hard_stop_ms_cl, + &js_hard_stop_ms_dumping, &js_reset_ms_ss, + &js_reset_ms_cl, &js_reset_ms_dumping); + + if (items == 8) { + struct kbasep_js_device_data *js_data = &kbdev->js_data; + unsigned long flags; + + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); + +#define UPDATE_TIMEOUT(ticks_name, ms_name, default) do {\ + js_data->ticks_name = timeout_ms_to_ticks(kbdev, ms_name, \ + default, js_data->ticks_name); \ + dev_dbg(kbdev->dev, "Overriding " #ticks_name \ + " with %lu ticks (%lu ms)\n", \ + (unsigned long)js_data->ticks_name, \ + ms_name); \ + } while (0) + + UPDATE_TIMEOUT(soft_stop_ticks, js_soft_stop_ms, + DEFAULT_JS_SOFT_STOP_TICKS); + UPDATE_TIMEOUT(soft_stop_ticks_cl, js_soft_stop_ms_cl, + DEFAULT_JS_SOFT_STOP_TICKS_CL); + UPDATE_TIMEOUT(hard_stop_ticks_ss, js_hard_stop_ms_ss, + kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8408) ? + DEFAULT_JS_HARD_STOP_TICKS_SS_8408 : + DEFAULT_JS_HARD_STOP_TICKS_SS); + UPDATE_TIMEOUT(hard_stop_ticks_cl, js_hard_stop_ms_cl, + DEFAULT_JS_HARD_STOP_TICKS_CL); + UPDATE_TIMEOUT(hard_stop_ticks_dumping, + js_hard_stop_ms_dumping, + DEFAULT_JS_HARD_STOP_TICKS_DUMPING); + UPDATE_TIMEOUT(gpu_reset_ticks_ss, js_reset_ms_ss, + kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8408) ? + DEFAULT_JS_RESET_TICKS_SS_8408 : + DEFAULT_JS_RESET_TICKS_SS); + UPDATE_TIMEOUT(gpu_reset_ticks_cl, js_reset_ms_cl, + DEFAULT_JS_RESET_TICKS_CL); + UPDATE_TIMEOUT(gpu_reset_ticks_dumping, js_reset_ms_dumping, + DEFAULT_JS_RESET_TICKS_DUMPING); + + kbase_js_set_timeouts(kbdev); + + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); + + return count; + } + + dev_err(kbdev->dev, "Couldn't process js_timeouts write operation.\n" + "Use format \n" + "Write 0 for no change, -1 to restore default timeout\n"); + return -EINVAL; +} + +static unsigned long get_js_timeout_in_ms( + u32 scheduling_period_ns, + u32 ticks) +{ + u64 ms = (u64)ticks * scheduling_period_ns; + + do_div(ms, 1000000UL); + return ms; +} + +/** + * show_js_timeouts - Show callback for the js_timeouts sysfs file. + * + * This function is called to get the contents of the js_timeouts sysfs + * file. It returns the last set values written to the js_timeouts sysfs file. + * If the file didn't get written yet, the values will be current setting in + * use. + * @dev: The device this sysfs file is for + * @attr: The attributes of the sysfs file + * @buf: The output buffer for the sysfs file contents + * + * Return: The number of bytes output to @buf. + */ +static ssize_t show_js_timeouts(struct device *dev, struct device_attribute *attr, char * const buf) +{ + struct kbase_device *kbdev; + ssize_t ret; + unsigned long js_soft_stop_ms; + unsigned long js_soft_stop_ms_cl; + unsigned long js_hard_stop_ms_ss; + unsigned long js_hard_stop_ms_cl; + unsigned long js_hard_stop_ms_dumping; + unsigned long js_reset_ms_ss; + unsigned long js_reset_ms_cl; + unsigned long js_reset_ms_dumping; + u32 scheduling_period_ns; + + kbdev = to_kbase_device(dev); + if (!kbdev) + return -ENODEV; + + scheduling_period_ns = kbdev->js_data.scheduling_period_ns; + +#define GET_TIMEOUT(name) get_js_timeout_in_ms(\ + scheduling_period_ns, \ + kbdev->js_data.name) + + js_soft_stop_ms = GET_TIMEOUT(soft_stop_ticks); + js_soft_stop_ms_cl = GET_TIMEOUT(soft_stop_ticks_cl); + js_hard_stop_ms_ss = GET_TIMEOUT(hard_stop_ticks_ss); + js_hard_stop_ms_cl = GET_TIMEOUT(hard_stop_ticks_cl); + js_hard_stop_ms_dumping = GET_TIMEOUT(hard_stop_ticks_dumping); + js_reset_ms_ss = GET_TIMEOUT(gpu_reset_ticks_ss); + js_reset_ms_cl = GET_TIMEOUT(gpu_reset_ticks_cl); + js_reset_ms_dumping = GET_TIMEOUT(gpu_reset_ticks_dumping); + +#undef GET_TIMEOUT + + ret = scnprintf(buf, PAGE_SIZE, "%lu %lu %lu %lu %lu %lu %lu %lu\n", + js_soft_stop_ms, js_soft_stop_ms_cl, + js_hard_stop_ms_ss, js_hard_stop_ms_cl, + js_hard_stop_ms_dumping, js_reset_ms_ss, + js_reset_ms_cl, js_reset_ms_dumping); + + if (ret >= PAGE_SIZE) { + buf[PAGE_SIZE - 2] = '\n'; + buf[PAGE_SIZE - 1] = '\0'; + ret = PAGE_SIZE - 1; + } + + return ret; +} + +/* + * The sysfs file js_timeouts. + * + * This is used to override the current job scheduler values for + * JS_STOP_STOP_TICKS_SS + * JS_STOP_STOP_TICKS_CL + * JS_HARD_STOP_TICKS_SS + * JS_HARD_STOP_TICKS_CL + * JS_HARD_STOP_TICKS_DUMPING + * JS_RESET_TICKS_SS + * JS_RESET_TICKS_CL + * JS_RESET_TICKS_DUMPING. + */ +static DEVICE_ATTR(js_timeouts, S_IRUGO | S_IWUSR, show_js_timeouts, set_js_timeouts); + +static u32 get_new_js_timeout( + u32 old_period, + u32 old_ticks, + u32 new_scheduling_period_ns) +{ + u64 ticks = (u64)old_period * (u64)old_ticks; + do_div(ticks, new_scheduling_period_ns); + return ticks?ticks:1; +} + +/** + * set_js_scheduling_period - Store callback for the js_scheduling_period sysfs + * file + * @dev: The device the sysfs file is for + * @attr: The attributes of the sysfs file + * @buf: The value written to the sysfs file + * @count: The number of bytes written to the sysfs file + * + * This function is called when the js_scheduling_period sysfs file is written + * to. It checks the data written, and if valid updates the js_scheduling_period + * value + * + * Return: @count if the function succeeded. An error code on failure. + */ +static ssize_t set_js_scheduling_period(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct kbase_device *kbdev; + int ret; + unsigned int js_scheduling_period; + u32 new_scheduling_period_ns; + u32 old_period; + struct kbasep_js_device_data *js_data; + unsigned long flags; + + kbdev = to_kbase_device(dev); + if (!kbdev) + return -ENODEV; + + js_data = &kbdev->js_data; + + ret = kstrtouint(buf, 0, &js_scheduling_period); + if (ret || !js_scheduling_period) { + dev_err(kbdev->dev, "Couldn't process js_scheduling_period write operation.\n" + "Use format \n"); + return -EINVAL; + } + + new_scheduling_period_ns = js_scheduling_period * 1000000; + + /* Update scheduling timeouts */ + mutex_lock(&js_data->runpool_mutex); + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); + + /* If no contexts have been scheduled since js_timeouts was last written + * to, the new timeouts might not have been latched yet. So check if an + * update is pending and use the new values if necessary. */ + + /* Use previous 'new' scheduling period as a base if present. */ + old_period = js_data->scheduling_period_ns; + +#define SET_TIMEOUT(name) \ + (js_data->name = get_new_js_timeout(\ + old_period, \ + kbdev->js_data.name, \ + new_scheduling_period_ns)) + + SET_TIMEOUT(soft_stop_ticks); + SET_TIMEOUT(soft_stop_ticks_cl); + SET_TIMEOUT(hard_stop_ticks_ss); + SET_TIMEOUT(hard_stop_ticks_cl); + SET_TIMEOUT(hard_stop_ticks_dumping); + SET_TIMEOUT(gpu_reset_ticks_ss); + SET_TIMEOUT(gpu_reset_ticks_cl); + SET_TIMEOUT(gpu_reset_ticks_dumping); + +#undef SET_TIMEOUT + + js_data->scheduling_period_ns = new_scheduling_period_ns; + + kbase_js_set_timeouts(kbdev); + + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); + mutex_unlock(&js_data->runpool_mutex); + + dev_dbg(kbdev->dev, "JS scheduling period: %dms\n", + js_scheduling_period); + + return count; +} + +/** + * show_js_scheduling_period - Show callback for the js_scheduling_period sysfs + * entry. + * @dev: The device this sysfs file is for. + * @attr: The attributes of the sysfs file. + * @buf: The output buffer to receive the GPU information. + * + * This function is called to get the current period used for the JS scheduling + * period. + * + * Return: The number of bytes output to @buf. + */ +static ssize_t show_js_scheduling_period(struct device *dev, + struct device_attribute *attr, char * const buf) +{ + struct kbase_device *kbdev; + u32 period; + ssize_t ret; + + kbdev = to_kbase_device(dev); + if (!kbdev) + return -ENODEV; + + period = kbdev->js_data.scheduling_period_ns; + + ret = scnprintf(buf, PAGE_SIZE, "%d\n", + period / 1000000); + + return ret; +} + +static DEVICE_ATTR(js_scheduling_period, S_IRUGO | S_IWUSR, + show_js_scheduling_period, set_js_scheduling_period); + +#if !MALI_CUSTOMER_RELEASE +/** + * set_force_replay - Store callback for the force_replay sysfs file. + * + * @dev: The device with sysfs file is for + * @attr: The attributes of the sysfs file + * @buf: The value written to the sysfs file + * @count: The number of bytes written to the sysfs file + * + * Return: @count if the function succeeded. An error code on failure. + */ +static ssize_t set_force_replay(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) +{ + struct kbase_device *kbdev; + + kbdev = to_kbase_device(dev); + if (!kbdev) + return -ENODEV; + + if (!strncmp("limit=", buf, MIN(6, count))) { + int force_replay_limit; + int items = sscanf(buf, "limit=%u", &force_replay_limit); + + if (items == 1) { + kbdev->force_replay_random = false; + kbdev->force_replay_limit = force_replay_limit; + kbdev->force_replay_count = 0; + + return count; + } + } else if (!strncmp("random_limit", buf, MIN(12, count))) { + kbdev->force_replay_random = true; + kbdev->force_replay_count = 0; + + return count; + } else if (!strncmp("norandom_limit", buf, MIN(14, count))) { + kbdev->force_replay_random = false; + kbdev->force_replay_limit = KBASEP_FORCE_REPLAY_DISABLED; + kbdev->force_replay_count = 0; + + return count; + } else if (!strncmp("core_req=", buf, MIN(9, count))) { + unsigned int core_req; + int items = sscanf(buf, "core_req=%x", &core_req); + + if (items == 1) { + kbdev->force_replay_core_req = (base_jd_core_req)core_req; + + return count; + } + } + dev_err(kbdev->dev, "Couldn't process force_replay write operation.\nPossible settings: limit=, random_limit, norandom_limit, core_req=\n"); + return -EINVAL; +} + +/** + * show_force_replay - Show callback for the force_replay sysfs file. + * + * This function is called to get the contents of the force_replay sysfs + * file. It returns the last set value written to the force_replay sysfs file. + * If the file didn't get written yet, the values will be 0. + * + * @dev: The device this sysfs file is for + * @attr: The attributes of the sysfs file + * @buf: The output buffer for the sysfs file contents + * + * Return: The number of bytes output to @buf. + */ +static ssize_t show_force_replay(struct device *dev, + struct device_attribute *attr, char * const buf) +{ + struct kbase_device *kbdev; + ssize_t ret; + + kbdev = to_kbase_device(dev); + if (!kbdev) + return -ENODEV; + + if (kbdev->force_replay_random) + ret = scnprintf(buf, PAGE_SIZE, + "limit=0\nrandom_limit\ncore_req=%x\n", + kbdev->force_replay_core_req); + else + ret = scnprintf(buf, PAGE_SIZE, + "limit=%u\nnorandom_limit\ncore_req=%x\n", + kbdev->force_replay_limit, + kbdev->force_replay_core_req); + + if (ret >= PAGE_SIZE) { + buf[PAGE_SIZE - 2] = '\n'; + buf[PAGE_SIZE - 1] = '\0'; + ret = PAGE_SIZE - 1; + } + + return ret; +} + +/* + * The sysfs file force_replay. + */ +static DEVICE_ATTR(force_replay, S_IRUGO | S_IWUSR, show_force_replay, + set_force_replay); +#endif /* !MALI_CUSTOMER_RELEASE */ + +#ifdef CONFIG_MALI_DEBUG +static ssize_t set_js_softstop_always(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct kbase_device *kbdev; + int ret; + int softstop_always; + + kbdev = to_kbase_device(dev); + if (!kbdev) + return -ENODEV; + + ret = kstrtoint(buf, 0, &softstop_always); + if (ret || ((softstop_always != 0) && (softstop_always != 1))) { + dev_err(kbdev->dev, "Couldn't process js_softstop_always write operation.\n" + "Use format \n"); + return -EINVAL; + } + + kbdev->js_data.softstop_always = (bool) softstop_always; + dev_dbg(kbdev->dev, "Support for softstop on a single context: %s\n", + (kbdev->js_data.softstop_always) ? + "Enabled" : "Disabled"); + return count; +} + +static ssize_t show_js_softstop_always(struct device *dev, + struct device_attribute *attr, char * const buf) +{ + struct kbase_device *kbdev; + ssize_t ret; + + kbdev = to_kbase_device(dev); + if (!kbdev) + return -ENODEV; + + ret = scnprintf(buf, PAGE_SIZE, "%d\n", kbdev->js_data.softstop_always); + + if (ret >= PAGE_SIZE) { + buf[PAGE_SIZE - 2] = '\n'; + buf[PAGE_SIZE - 1] = '\0'; + ret = PAGE_SIZE - 1; + } + + return ret; +} + +/* + * By default, soft-stops are disabled when only a single context is present. + * The ability to enable soft-stop when only a single context is present can be + * used for debug and unit-testing purposes. + * (see CL t6xx_stress_1 unit-test as an example whereby this feature is used.) + */ +static DEVICE_ATTR(js_softstop_always, S_IRUGO | S_IWUSR, show_js_softstop_always, set_js_softstop_always); +#endif /* CONFIG_MALI_DEBUG */ + +#ifdef CONFIG_MALI_DEBUG +typedef void (kbasep_debug_command_func) (struct kbase_device *); + +enum kbasep_debug_command_code { + KBASEP_DEBUG_COMMAND_DUMPTRACE, + + /* This must be the last enum */ + KBASEP_DEBUG_COMMAND_COUNT +}; + +struct kbasep_debug_command { + char *str; + kbasep_debug_command_func *func; +}; + +/* Debug commands supported by the driver */ +static const struct kbasep_debug_command debug_commands[] = { + { + .str = "dumptrace", + .func = &kbasep_trace_dump, + } +}; + +/** + * show_debug - Show callback for the debug_command sysfs file. + * + * This function is called to get the contents of the debug_command sysfs + * file. This is a list of the available debug commands, separated by newlines. + * + * @dev: The device this sysfs file is for + * @attr: The attributes of the sysfs file + * @buf: The output buffer for the sysfs file contents + * + * Return: The number of bytes output to @buf. + */ +static ssize_t show_debug(struct device *dev, struct device_attribute *attr, char * const buf) +{ + struct kbase_device *kbdev; + int i; + ssize_t ret = 0; + + kbdev = to_kbase_device(dev); + + if (!kbdev) + return -ENODEV; + + for (i = 0; i < KBASEP_DEBUG_COMMAND_COUNT && ret < PAGE_SIZE; i++) + ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s\n", debug_commands[i].str); + + if (ret >= PAGE_SIZE) { + buf[PAGE_SIZE - 2] = '\n'; + buf[PAGE_SIZE - 1] = '\0'; + ret = PAGE_SIZE - 1; + } + + return ret; +} + +/** + * issue_debug - Store callback for the debug_command sysfs file. + * + * This function is called when the debug_command sysfs file is written to. + * It matches the requested command against the available commands, and if + * a matching command is found calls the associated function from + * @debug_commands to issue the command. + * + * @dev: The device with sysfs file is for + * @attr: The attributes of the sysfs file + * @buf: The value written to the sysfs file + * @count: The number of bytes written to the sysfs file + * + * Return: @count if the function succeeded. An error code on failure. + */ +static ssize_t issue_debug(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) +{ + struct kbase_device *kbdev; + int i; + + kbdev = to_kbase_device(dev); + + if (!kbdev) + return -ENODEV; + + for (i = 0; i < KBASEP_DEBUG_COMMAND_COUNT; i++) { + if (sysfs_streq(debug_commands[i].str, buf)) { + debug_commands[i].func(kbdev); + return count; + } + } + + /* Debug Command not found */ + dev_err(dev, "debug_command: command not known\n"); + return -EINVAL; +} + +/* The sysfs file debug_command. + * + * This is used to issue general debug commands to the device driver. + * Reading it will produce a list of debug commands, separated by newlines. + * Writing to it with one of those commands will issue said command. + */ +static DEVICE_ATTR(debug_command, S_IRUGO | S_IWUSR, show_debug, issue_debug); +#endif /* CONFIG_MALI_DEBUG */ + +/** + * kbase_show_gpuinfo - Show callback for the gpuinfo sysfs entry. + * @dev: The device this sysfs file is for. + * @attr: The attributes of the sysfs file. + * @buf: The output buffer to receive the GPU information. + * + * This function is called to get a description of the present Mali + * GPU via the gpuinfo sysfs entry. This includes the GPU family, the + * number of cores, the hardware version and the raw product id. For + * example + * + * Mali-T60x MP4 r0p0 0x6956 + * + * Return: The number of bytes output to @buf. + */ +static ssize_t kbase_show_gpuinfo(struct device *dev, + struct device_attribute *attr, char *buf) +{ + static const struct gpu_product_id_name { + unsigned id; + char *name; + } gpu_product_id_names[] = { + { .id = GPU_ID_PI_T60X, .name = "Mali-T60x" }, + { .id = GPU_ID_PI_T62X, .name = "Mali-T62x" }, + { .id = GPU_ID_PI_T72X, .name = "Mali-T72x" }, + { .id = GPU_ID_PI_T76X, .name = "Mali-T76x" }, + { .id = GPU_ID_PI_T82X, .name = "Mali-T82x" }, + { .id = GPU_ID_PI_T83X, .name = "Mali-T83x" }, + { .id = GPU_ID_PI_T86X, .name = "Mali-T86x" }, + { .id = GPU_ID_PI_TFRX, .name = "Mali-T88x" }, + { .id = GPU_ID2_PRODUCT_TMIX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT, + .name = "Mali-G71" }, + { .id = GPU_ID2_PRODUCT_THEX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT, + .name = "Mali-G72" }, + { .id = GPU_ID2_PRODUCT_TSIX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT, + .name = "Mali-G51" }, + { .id = GPU_ID2_PRODUCT_TNOX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT, + .name = "Mali-TNOx" }, + }; + const char *product_name = "(Unknown Mali GPU)"; + struct kbase_device *kbdev; + u32 gpu_id; + unsigned product_id, product_id_mask; + unsigned i; + bool is_new_format; + + kbdev = to_kbase_device(dev); + if (!kbdev) + return -ENODEV; + + gpu_id = kbdev->gpu_props.props.raw_props.gpu_id; + product_id = gpu_id >> GPU_ID_VERSION_PRODUCT_ID_SHIFT; + is_new_format = GPU_ID_IS_NEW_FORMAT(product_id); + product_id_mask = + (is_new_format ? + GPU_ID2_PRODUCT_MODEL : + GPU_ID_VERSION_PRODUCT_ID) >> + GPU_ID_VERSION_PRODUCT_ID_SHIFT; + + for (i = 0; i < ARRAY_SIZE(gpu_product_id_names); ++i) { + const struct gpu_product_id_name *p = &gpu_product_id_names[i]; + + if ((GPU_ID_IS_NEW_FORMAT(p->id) == is_new_format) && + (p->id & product_id_mask) == + (product_id & product_id_mask)) { + product_name = p->name; + break; + } + } + + return scnprintf(buf, PAGE_SIZE, "%s %d cores r%dp%d 0x%04X\n", + product_name, kbdev->gpu_props.num_cores, + (gpu_id & GPU_ID_VERSION_MAJOR) >> GPU_ID_VERSION_MAJOR_SHIFT, + (gpu_id & GPU_ID_VERSION_MINOR) >> GPU_ID_VERSION_MINOR_SHIFT, + product_id); +} +static DEVICE_ATTR(gpuinfo, S_IRUGO, kbase_show_gpuinfo, NULL); + +/** + * set_dvfs_period - Store callback for the dvfs_period sysfs file. + * @dev: The device with sysfs file is for + * @attr: The attributes of the sysfs file + * @buf: The value written to the sysfs file + * @count: The number of bytes written to the sysfs file + * + * This function is called when the dvfs_period sysfs file is written to. It + * checks the data written, and if valid updates the DVFS period variable, + * + * Return: @count if the function succeeded. An error code on failure. + */ +static ssize_t set_dvfs_period(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct kbase_device *kbdev; + int ret; + int dvfs_period; + + kbdev = to_kbase_device(dev); + if (!kbdev) + return -ENODEV; + + ret = kstrtoint(buf, 0, &dvfs_period); + if (ret || dvfs_period <= 0) { + dev_err(kbdev->dev, "Couldn't process dvfs_period write operation.\n" + "Use format \n"); + return -EINVAL; + } + + kbdev->pm.dvfs_period = dvfs_period; + dev_dbg(kbdev->dev, "DVFS period: %dms\n", dvfs_period); + + return count; +} + +/** + * show_dvfs_period - Show callback for the dvfs_period sysfs entry. + * @dev: The device this sysfs file is for. + * @attr: The attributes of the sysfs file. + * @buf: The output buffer to receive the GPU information. + * + * This function is called to get the current period used for the DVFS sample + * timer. + * + * Return: The number of bytes output to @buf. + */ +static ssize_t show_dvfs_period(struct device *dev, + struct device_attribute *attr, char * const buf) +{ + struct kbase_device *kbdev; + ssize_t ret; + + kbdev = to_kbase_device(dev); + if (!kbdev) + return -ENODEV; + + ret = scnprintf(buf, PAGE_SIZE, "%d\n", kbdev->pm.dvfs_period); + + return ret; +} + +static DEVICE_ATTR(dvfs_period, S_IRUGO | S_IWUSR, show_dvfs_period, + set_dvfs_period); + +/** + * set_pm_poweroff - Store callback for the pm_poweroff sysfs file. + * @dev: The device with sysfs file is for + * @attr: The attributes of the sysfs file + * @buf: The value written to the sysfs file + * @count: The number of bytes written to the sysfs file + * + * This function is called when the pm_poweroff sysfs file is written to. + * + * This file contains three values separated by whitespace. The values + * are gpu_poweroff_time (the period of the poweroff timer, in ns), + * poweroff_shader_ticks (the number of poweroff timer ticks before an idle + * shader is powered off), and poweroff_gpu_ticks (the number of poweroff timer + * ticks before the GPU is powered off), in that order. + * + * Return: @count if the function succeeded. An error code on failure. + */ +static ssize_t set_pm_poweroff(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct kbase_device *kbdev; + int items; + s64 gpu_poweroff_time; + int poweroff_shader_ticks, poweroff_gpu_ticks; + + kbdev = to_kbase_device(dev); + if (!kbdev) + return -ENODEV; + + items = sscanf(buf, "%llu %u %u", &gpu_poweroff_time, + &poweroff_shader_ticks, + &poweroff_gpu_ticks); + if (items != 3) { + dev_err(kbdev->dev, "Couldn't process pm_poweroff write operation.\n" + "Use format \n"); + return -EINVAL; + } + + kbdev->pm.gpu_poweroff_time = HR_TIMER_DELAY_NSEC(gpu_poweroff_time); + kbdev->pm.poweroff_shader_ticks = poweroff_shader_ticks; + kbdev->pm.poweroff_gpu_ticks = poweroff_gpu_ticks; + + return count; +} + +/** + * show_pm_poweroff - Show callback for the pm_poweroff sysfs entry. + * @dev: The device this sysfs file is for. + * @attr: The attributes of the sysfs file. + * @buf: The output buffer to receive the GPU information. + * + * This function is called to get the current period used for the DVFS sample + * timer. + * + * Return: The number of bytes output to @buf. + */ +static ssize_t show_pm_poweroff(struct device *dev, + struct device_attribute *attr, char * const buf) +{ + struct kbase_device *kbdev; + ssize_t ret; + + kbdev = to_kbase_device(dev); + if (!kbdev) + return -ENODEV; + + ret = scnprintf(buf, PAGE_SIZE, "%llu %u %u\n", + ktime_to_ns(kbdev->pm.gpu_poweroff_time), + kbdev->pm.poweroff_shader_ticks, + kbdev->pm.poweroff_gpu_ticks); + + return ret; +} + +static DEVICE_ATTR(pm_poweroff, S_IRUGO | S_IWUSR, show_pm_poweroff, + set_pm_poweroff); + +/** + * set_reset_timeout - Store callback for the reset_timeout sysfs file. + * @dev: The device with sysfs file is for + * @attr: The attributes of the sysfs file + * @buf: The value written to the sysfs file + * @count: The number of bytes written to the sysfs file + * + * This function is called when the reset_timeout sysfs file is written to. It + * checks the data written, and if valid updates the reset timeout. + * + * Return: @count if the function succeeded. An error code on failure. + */ +static ssize_t set_reset_timeout(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct kbase_device *kbdev; + int ret; + int reset_timeout; + + kbdev = to_kbase_device(dev); + if (!kbdev) + return -ENODEV; + + ret = kstrtoint(buf, 0, &reset_timeout); + if (ret || reset_timeout <= 0) { + dev_err(kbdev->dev, "Couldn't process reset_timeout write operation.\n" + "Use format \n"); + return -EINVAL; + } + + kbdev->reset_timeout_ms = reset_timeout; + dev_dbg(kbdev->dev, "Reset timeout: %dms\n", reset_timeout); + + return count; +} + +/** + * show_reset_timeout - Show callback for the reset_timeout sysfs entry. + * @dev: The device this sysfs file is for. + * @attr: The attributes of the sysfs file. + * @buf: The output buffer to receive the GPU information. + * + * This function is called to get the current reset timeout. + * + * Return: The number of bytes output to @buf. + */ +static ssize_t show_reset_timeout(struct device *dev, + struct device_attribute *attr, char * const buf) +{ + struct kbase_device *kbdev; + ssize_t ret; + + kbdev = to_kbase_device(dev); + if (!kbdev) + return -ENODEV; + + ret = scnprintf(buf, PAGE_SIZE, "%d\n", kbdev->reset_timeout_ms); + + return ret; +} + +static DEVICE_ATTR(reset_timeout, S_IRUGO | S_IWUSR, show_reset_timeout, + set_reset_timeout); + + + +static ssize_t show_mem_pool_size(struct device *dev, + struct device_attribute *attr, char * const buf) +{ + struct kbase_device *kbdev; + ssize_t ret; + + kbdev = to_kbase_device(dev); + if (!kbdev) + return -ENODEV; + + ret = scnprintf(buf, PAGE_SIZE, "%zu\n", + kbase_mem_pool_size(&kbdev->mem_pool)); + + return ret; +} + +static ssize_t set_mem_pool_size(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct kbase_device *kbdev; + size_t new_size; + int err; + + kbdev = to_kbase_device(dev); + if (!kbdev) + return -ENODEV; + + err = kstrtoul(buf, 0, (unsigned long *)&new_size); + if (err) + return err; + + kbase_mem_pool_trim(&kbdev->mem_pool, new_size); + + return count; +} + +static DEVICE_ATTR(mem_pool_size, S_IRUGO | S_IWUSR, show_mem_pool_size, + set_mem_pool_size); + +static ssize_t show_mem_pool_max_size(struct device *dev, + struct device_attribute *attr, char * const buf) +{ + struct kbase_device *kbdev; + ssize_t ret; + + kbdev = to_kbase_device(dev); + if (!kbdev) + return -ENODEV; + + ret = scnprintf(buf, PAGE_SIZE, "%zu\n", + kbase_mem_pool_max_size(&kbdev->mem_pool)); + + return ret; +} + +static ssize_t set_mem_pool_max_size(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct kbase_device *kbdev; + size_t new_max_size; + int err; + + kbdev = to_kbase_device(dev); + if (!kbdev) + return -ENODEV; + + err = kstrtoul(buf, 0, (unsigned long *)&new_max_size); + if (err) + return -EINVAL; + + kbase_mem_pool_set_max_size(&kbdev->mem_pool, new_max_size); + + return count; +} + +static DEVICE_ATTR(mem_pool_max_size, S_IRUGO | S_IWUSR, show_mem_pool_max_size, + set_mem_pool_max_size); + +/** + * show_lp_mem_pool_size - Show size of the large memory pages pool. + * @dev: The device this sysfs file is for. + * @attr: The attributes of the sysfs file. + * @buf: The output buffer to receive the pool size. + * + * This function is called to get the number of large memory pages which currently populate the kbdev pool. + * + * Return: The number of bytes output to @buf. + */ +static ssize_t show_lp_mem_pool_size(struct device *dev, + struct device_attribute *attr, char * const buf) +{ + struct kbase_device *kbdev; + + kbdev = to_kbase_device(dev); + if (!kbdev) + return -ENODEV; + + return scnprintf(buf, PAGE_SIZE, "%zu\n", kbase_mem_pool_size(&kbdev->lp_mem_pool)); +} + +/** + * set_lp_mem_pool_size - Set size of the large memory pages pool. + * @dev: The device this sysfs file is for. + * @attr: The attributes of the sysfs file. + * @buf: The value written to the sysfs file. + * @count: The number of bytes written to the sysfs file. + * + * This function is called to set the number of large memory pages which should populate the kbdev pool. + * This may cause existing pages to be removed from the pool, or new pages to be created and then added to the pool. + * + * Return: @count if the function succeeded. An error code on failure. + */ +static ssize_t set_lp_mem_pool_size(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct kbase_device *kbdev; + unsigned long new_size; + int err; + + kbdev = to_kbase_device(dev); + if (!kbdev) + return -ENODEV; + + err = kstrtoul(buf, 0, &new_size); + if (err) + return err; + + kbase_mem_pool_trim(&kbdev->lp_mem_pool, new_size); + + return count; +} + +static DEVICE_ATTR(lp_mem_pool_size, S_IRUGO | S_IWUSR, show_lp_mem_pool_size, + set_lp_mem_pool_size); + +/** + * show_lp_mem_pool_max_size - Show maximum size of the large memory pages pool. + * @dev: The device this sysfs file is for. + * @attr: The attributes of the sysfs file. + * @buf: The output buffer to receive the pool size. + * + * This function is called to get the maximum number of large memory pages that the kbdev pool can possibly contain. + * + * Return: The number of bytes output to @buf. + */ +static ssize_t show_lp_mem_pool_max_size(struct device *dev, + struct device_attribute *attr, char * const buf) +{ + struct kbase_device *kbdev; + + kbdev = to_kbase_device(dev); + if (!kbdev) + return -ENODEV; + + return scnprintf(buf, PAGE_SIZE, "%zu\n", kbase_mem_pool_max_size(&kbdev->lp_mem_pool)); +} + +/** + * set_lp_mem_pool_max_size - Set maximum size of the large memory pages pool. + * @dev: The device this sysfs file is for. + * @attr: The attributes of the sysfs file. + * @buf: The value written to the sysfs file. + * @count: The number of bytes written to the sysfs file. + * + * This function is called to set the maximum number of large memory pages that the kbdev pool can possibly contain. + * + * Return: @count if the function succeeded. An error code on failure. + */ +static ssize_t set_lp_mem_pool_max_size(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct kbase_device *kbdev; + unsigned long new_max_size; + int err; + + kbdev = to_kbase_device(dev); + if (!kbdev) + return -ENODEV; + + err = kstrtoul(buf, 0, &new_max_size); + if (err) + return -EINVAL; + + kbase_mem_pool_set_max_size(&kbdev->lp_mem_pool, new_max_size); + + return count; +} + +static DEVICE_ATTR(lp_mem_pool_max_size, S_IRUGO | S_IWUSR, show_lp_mem_pool_max_size, + set_lp_mem_pool_max_size); + +#ifdef CONFIG_DEBUG_FS + +/* Number of entries in serialize_jobs_settings[] */ +#define NR_SERIALIZE_JOBS_SETTINGS 5 +/* Maximum string length in serialize_jobs_settings[].name */ +#define MAX_SERIALIZE_JOBS_NAME_LEN 16 + +static struct +{ + char *name; + u8 setting; +} serialize_jobs_settings[NR_SERIALIZE_JOBS_SETTINGS] = { + {"none", 0}, + {"intra-slot", KBASE_SERIALIZE_INTRA_SLOT}, + {"inter-slot", KBASE_SERIALIZE_INTER_SLOT}, + {"full", KBASE_SERIALIZE_INTRA_SLOT | KBASE_SERIALIZE_INTER_SLOT}, + {"full-reset", KBASE_SERIALIZE_INTRA_SLOT | KBASE_SERIALIZE_INTER_SLOT | + KBASE_SERIALIZE_RESET} +}; + +/** + * kbasep_serialize_jobs_seq_show - Show callback for the serialize_jobs debugfs + * file + * @sfile: seq_file pointer + * @data: Private callback data + * + * This function is called to get the contents of the serialize_jobs debugfs + * file. This is a list of the available settings with the currently active one + * surrounded by square brackets. + * + * Return: 0 on success, or an error code on error + */ +static int kbasep_serialize_jobs_seq_show(struct seq_file *sfile, void *data) +{ + struct kbase_device *kbdev = sfile->private; + int i; + + CSTD_UNUSED(data); + + for (i = 0; i < NR_SERIALIZE_JOBS_SETTINGS; i++) { + if (kbdev->serialize_jobs == serialize_jobs_settings[i].setting) + seq_printf(sfile, "[%s] ", + serialize_jobs_settings[i].name); + else + seq_printf(sfile, "%s ", + serialize_jobs_settings[i].name); + } + + seq_puts(sfile, "\n"); + + return 0; +} + +/** + * kbasep_serialize_jobs_debugfs_write - Store callback for the serialize_jobs + * debugfs file. + * @file: File pointer + * @ubuf: User buffer containing data to store + * @count: Number of bytes in user buffer + * @ppos: File position + * + * This function is called when the serialize_jobs debugfs file is written to. + * It matches the requested setting against the available settings and if a + * matching setting is found updates kbdev->serialize_jobs. + * + * Return: @count if the function succeeded. An error code on failure. + */ +static ssize_t kbasep_serialize_jobs_debugfs_write(struct file *file, + const char __user *ubuf, size_t count, loff_t *ppos) +{ + struct seq_file *s = file->private_data; + struct kbase_device *kbdev = s->private; + char buf[MAX_SERIALIZE_JOBS_NAME_LEN]; + int i; + bool valid = false; + + CSTD_UNUSED(ppos); + + count = min_t(size_t, sizeof(buf) - 1, count); + if (copy_from_user(buf, ubuf, count)) + return -EFAULT; + + buf[count] = 0; + + for (i = 0; i < NR_SERIALIZE_JOBS_SETTINGS; i++) { + if (sysfs_streq(serialize_jobs_settings[i].name, buf)) { + kbdev->serialize_jobs = + serialize_jobs_settings[i].setting; + valid = true; + break; + } + } + + if (!valid) { + dev_err(kbdev->dev, "serialize_jobs: invalid setting\n"); + return -EINVAL; + } + + return count; +} + +/** + * kbasep_serialize_jobs_debugfs_open - Open callback for the serialize_jobs + * debugfs file + * @in: inode pointer + * @file: file pointer + * + * Return: Zero on success, error code on failure + */ +static int kbasep_serialize_jobs_debugfs_open(struct inode *in, + struct file *file) +{ + return single_open(file, kbasep_serialize_jobs_seq_show, in->i_private); +} + +static const struct file_operations kbasep_serialize_jobs_debugfs_fops = { + .open = kbasep_serialize_jobs_debugfs_open, + .read = seq_read, + .write = kbasep_serialize_jobs_debugfs_write, + .llseek = seq_lseek, + .release = single_release, +}; + +#endif /* CONFIG_DEBUG_FS */ + +static int kbasep_protected_mode_init(struct kbase_device *kbdev) +{ +#ifdef CONFIG_OF + struct device_node *protected_node; + struct platform_device *pdev; + struct protected_mode_device *protected_dev; +#endif + + if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_PROTECTED_MODE)) { + /* Use native protected ops */ + kbdev->protected_dev = kzalloc(sizeof(*kbdev->protected_dev), + GFP_KERNEL); + if (!kbdev->protected_dev) + return -ENOMEM; + kbdev->protected_dev->data = kbdev; + kbdev->protected_ops = &kbase_native_protected_ops; + kbdev->protected_mode_support = true; + return 0; + } + + kbdev->protected_mode_support = false; + +#ifdef CONFIG_OF + protected_node = of_parse_phandle(kbdev->dev->of_node, + "protected-mode-switcher", 0); + + if (!protected_node) + protected_node = of_parse_phandle(kbdev->dev->of_node, + "secure-mode-switcher", 0); + + if (!protected_node) { + /* If protected_node cannot be looked up then we assume + * protected mode is not supported on this platform. */ + dev_info(kbdev->dev, "Protected mode not available\n"); + return 0; + } + + pdev = of_find_device_by_node(protected_node); + if (!pdev) + return -EINVAL; + + protected_dev = platform_get_drvdata(pdev); + if (!protected_dev) + return -EPROBE_DEFER; + + kbdev->protected_ops = &protected_dev->ops; + kbdev->protected_dev = protected_dev; + + if (kbdev->protected_ops) { + int err; + + /* Make sure protected mode is disabled on startup */ + mutex_lock(&kbdev->pm.lock); + err = kbdev->protected_ops->protected_mode_disable( + kbdev->protected_dev); + mutex_unlock(&kbdev->pm.lock); + + /* protected_mode_disable() returns -EINVAL if not supported */ + kbdev->protected_mode_support = (err != -EINVAL); + } +#endif + return 0; +} + +static void kbasep_protected_mode_term(struct kbase_device *kbdev) +{ + if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_PROTECTED_MODE)) + kfree(kbdev->protected_dev); +} + +#ifdef CONFIG_MALI_NO_MALI +static int kbase_common_reg_map(struct kbase_device *kbdev) +{ + return 0; +} +static void kbase_common_reg_unmap(struct kbase_device * const kbdev) +{ +} +#else /* CONFIG_MALI_NO_MALI */ +static int kbase_common_reg_map(struct kbase_device *kbdev) +{ + int err = 0; + + if (!request_mem_region(kbdev->reg_start, kbdev->reg_size, dev_name(kbdev->dev))) { + dev_err(kbdev->dev, "Register window unavailable\n"); + err = -EIO; + goto out_region; + } + + kbdev->reg = ioremap(kbdev->reg_start, kbdev->reg_size); + if (!kbdev->reg) { + dev_err(kbdev->dev, "Can't remap register window\n"); + err = -EINVAL; + goto out_ioremap; + } + + return err; + + out_ioremap: + release_mem_region(kbdev->reg_start, kbdev->reg_size); + out_region: + return err; +} + +static void kbase_common_reg_unmap(struct kbase_device * const kbdev) +{ + if (kbdev->reg) { + iounmap(kbdev->reg); + release_mem_region(kbdev->reg_start, kbdev->reg_size); + kbdev->reg = NULL; + kbdev->reg_start = 0; + kbdev->reg_size = 0; + } +} +#endif /* CONFIG_MALI_NO_MALI */ + +static int registers_map(struct kbase_device * const kbdev) +{ + + /* the first memory resource is the physical address of the GPU + * registers */ + struct platform_device *pdev = to_platform_device(kbdev->dev); + struct resource *reg_res; + int err; + + reg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!reg_res) { + dev_err(kbdev->dev, "Invalid register resource\n"); + return -ENOENT; + } + + kbdev->reg_start = reg_res->start; + kbdev->reg_size = resource_size(reg_res); + + err = kbase_common_reg_map(kbdev); + if (err) { + dev_err(kbdev->dev, "Failed to map registers\n"); + return err; + } + + return 0; +} + +static void registers_unmap(struct kbase_device *kbdev) +{ + kbase_common_reg_unmap(kbdev); +} + +static int power_control_init(struct platform_device *pdev) +{ + struct kbase_device *kbdev = to_kbase_device(&pdev->dev); + int err = 0; + + if (!kbdev) + return -ENODEV; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0)) && defined(CONFIG_OF) \ + && defined(CONFIG_REGULATOR) + kbdev->regulator = regulator_get_optional(kbdev->dev, "mali"); + if (IS_ERR_OR_NULL(kbdev->regulator)) { + err = PTR_ERR(kbdev->regulator); + kbdev->regulator = NULL; + if (err == -EPROBE_DEFER) { + dev_err(&pdev->dev, "Failed to get regulator\n"); + return err; + } + dev_info(kbdev->dev, + "Continuing without Mali regulator control\n"); + /* Allow probe to continue without regulator */ + } +#endif /* LINUX_VERSION_CODE >= 3, 12, 0 */ + + kbdev->clock = clk_get(kbdev->dev, "core"); + if (IS_ERR_OR_NULL(kbdev->clock)) { + err = PTR_ERR(kbdev->clock); + kbdev->clock = NULL; + if (err == -EPROBE_DEFER) { + dev_err(&pdev->dev, "Failed to get clock\n"); + goto fail; + } + dev_info(kbdev->dev, "Continuing without Mali clock control\n"); + /* Allow probe to continue without clock. */ + } else { + err = clk_prepare_enable(kbdev->clock); + if (err) { + dev_err(kbdev->dev, + "Failed to prepare and enable clock (%d)\n", + err); + goto fail; + } + } + +#if defined(CONFIG_OF) && defined(CONFIG_PM_OPP) + /* Register the OPPs if they are available in device tree */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) \ + || defined(LSK_OPPV2_BACKPORT) + err = dev_pm_opp_of_add_table(kbdev->dev); +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) + err = of_init_opp_table(kbdev->dev); +#else + err = 0; +#endif /* LINUX_VERSION_CODE */ + if (err) + dev_dbg(kbdev->dev, "OPP table not found\n"); +#endif /* CONFIG_OF && CONFIG_PM_OPP */ + + return 0; + +fail: + +if (kbdev->clock != NULL) { + clk_put(kbdev->clock); + kbdev->clock = NULL; +} + +#ifdef CONFIG_REGULATOR + if (NULL != kbdev->regulator) { + regulator_put(kbdev->regulator); + kbdev->regulator = NULL; + } +#endif + + return err; +} + +static void power_control_term(struct kbase_device *kbdev) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) || \ + defined(LSK_OPPV2_BACKPORT) + dev_pm_opp_of_remove_table(kbdev->dev); +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) + of_free_opp_table(kbdev->dev); +#endif + + if (kbdev->clock) { + clk_disable_unprepare(kbdev->clock); + clk_put(kbdev->clock); + kbdev->clock = NULL; + } + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0)) && defined(CONFIG_OF) \ + && defined(CONFIG_REGULATOR) + if (kbdev->regulator) { + regulator_put(kbdev->regulator); + kbdev->regulator = NULL; + } +#endif /* LINUX_VERSION_CODE >= 3, 12, 0 */ +} + +#ifdef CONFIG_DEBUG_FS + +#if KBASE_GPU_RESET_EN +#include + +static void trigger_quirks_reload(struct kbase_device *kbdev) +{ + kbase_pm_context_active(kbdev); + if (kbase_prepare_to_reset_gpu(kbdev)) + kbase_reset_gpu(kbdev); + kbase_pm_context_idle(kbdev); +} + +#define MAKE_QUIRK_ACCESSORS(type) \ +static int type##_quirks_set(void *data, u64 val) \ +{ \ + struct kbase_device *kbdev; \ + kbdev = (struct kbase_device *)data; \ + kbdev->hw_quirks_##type = (u32)val; \ + trigger_quirks_reload(kbdev); \ + return 0;\ +} \ +\ +static int type##_quirks_get(void *data, u64 *val) \ +{ \ + struct kbase_device *kbdev;\ + kbdev = (struct kbase_device *)data;\ + *val = kbdev->hw_quirks_##type;\ + return 0;\ +} \ +DEFINE_SIMPLE_ATTRIBUTE(fops_##type##_quirks, type##_quirks_get,\ + type##_quirks_set, "%llu\n") + +MAKE_QUIRK_ACCESSORS(sc); +MAKE_QUIRK_ACCESSORS(tiler); +MAKE_QUIRK_ACCESSORS(mmu); +MAKE_QUIRK_ACCESSORS(jm); + +#endif /* KBASE_GPU_RESET_EN */ + +/** + * debugfs_protected_debug_mode_read - "protected_debug_mode" debugfs read + * @file: File object to read is for + * @buf: User buffer to populate with data + * @len: Length of user buffer + * @ppos: Offset within file object + * + * Retrieves the current status of protected debug mode + * (0 = disabled, 1 = enabled) + * + * Return: Number of bytes added to user buffer + */ +static ssize_t debugfs_protected_debug_mode_read(struct file *file, + char __user *buf, size_t len, loff_t *ppos) +{ + struct kbase_device *kbdev = (struct kbase_device *)file->private_data; + u32 gpu_status; + ssize_t ret_val; + + kbase_pm_context_active(kbdev); + gpu_status = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_STATUS), NULL); + kbase_pm_context_idle(kbdev); + + if (gpu_status & GPU_DBGEN) + ret_val = simple_read_from_buffer(buf, len, ppos, "1\n", 2); + else + ret_val = simple_read_from_buffer(buf, len, ppos, "0\n", 2); + + return ret_val; +} + +/* + * struct fops_protected_debug_mode - "protected_debug_mode" debugfs fops + * + * Contains the file operations for the "protected_debug_mode" debugfs file + */ +static const struct file_operations fops_protected_debug_mode = { + .open = simple_open, + .read = debugfs_protected_debug_mode_read, + .llseek = default_llseek, +}; + +static int kbase_device_debugfs_init(struct kbase_device *kbdev) +{ + struct dentry *debugfs_ctx_defaults_directory; + int err; + + kbdev->mali_debugfs_directory = debugfs_create_dir(kbdev->devname, + NULL); + if (!kbdev->mali_debugfs_directory) { + dev_err(kbdev->dev, "Couldn't create mali debugfs directory\n"); + err = -ENOMEM; + goto out; + } + + kbdev->debugfs_ctx_directory = debugfs_create_dir("ctx", + kbdev->mali_debugfs_directory); + if (!kbdev->debugfs_ctx_directory) { + dev_err(kbdev->dev, "Couldn't create mali debugfs ctx directory\n"); + err = -ENOMEM; + goto out; + } + + debugfs_ctx_defaults_directory = debugfs_create_dir("defaults", + kbdev->debugfs_ctx_directory); + if (!debugfs_ctx_defaults_directory) { + dev_err(kbdev->dev, "Couldn't create mali debugfs ctx defaults directory\n"); + err = -ENOMEM; + goto out; + } + +#if !MALI_CUSTOMER_RELEASE + kbasep_regs_dump_debugfs_init(kbdev); +#endif /* !MALI_CUSTOMER_RELEASE */ + kbasep_regs_history_debugfs_init(kbdev); + + kbase_debug_job_fault_debugfs_init(kbdev); + kbasep_gpu_memory_debugfs_init(kbdev); + kbase_as_fault_debugfs_init(kbdev); +#if KBASE_GPU_RESET_EN + /* fops_* variables created by invocations of macro + * MAKE_QUIRK_ACCESSORS() above. */ + debugfs_create_file("quirks_sc", 0644, + kbdev->mali_debugfs_directory, kbdev, + &fops_sc_quirks); + debugfs_create_file("quirks_tiler", 0644, + kbdev->mali_debugfs_directory, kbdev, + &fops_tiler_quirks); + debugfs_create_file("quirks_mmu", 0644, + kbdev->mali_debugfs_directory, kbdev, + &fops_mmu_quirks); + debugfs_create_file("quirks_jm", 0644, + kbdev->mali_debugfs_directory, kbdev, + &fops_jm_quirks); +#endif /* KBASE_GPU_RESET_EN */ + + debugfs_create_bool("infinite_cache", 0644, + debugfs_ctx_defaults_directory, + &kbdev->infinite_cache_active_default); + + debugfs_create_size_t("mem_pool_max_size", 0644, + debugfs_ctx_defaults_directory, + &kbdev->mem_pool_max_size_default); + + if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_PROTECTED_DEBUG_MODE)) { + debugfs_create_file("protected_debug_mode", S_IRUGO, + kbdev->mali_debugfs_directory, kbdev, + &fops_protected_debug_mode); + } + +#if KBASE_TRACE_ENABLE + kbasep_trace_debugfs_init(kbdev); +#endif /* KBASE_TRACE_ENABLE */ + +#ifdef CONFIG_MALI_TRACE_TIMELINE + kbasep_trace_timeline_debugfs_init(kbdev); +#endif /* CONFIG_MALI_TRACE_TIMELINE */ + +#ifdef CONFIG_MALI_DEVFREQ +#ifdef CONFIG_DEVFREQ_THERMAL + if (kbdev->inited_subsys & inited_devfreq) + kbase_ipa_debugfs_init(kbdev); +#endif /* CONFIG_DEVFREQ_THERMAL */ +#endif /* CONFIG_MALI_DEVFREQ */ + +#ifdef CONFIG_DEBUG_FS + debugfs_create_file("serialize_jobs", S_IRUGO | S_IWUSR, + kbdev->mali_debugfs_directory, kbdev, + &kbasep_serialize_jobs_debugfs_fops); +#endif /* CONFIG_DEBUG_FS */ + + return 0; + +out: + debugfs_remove_recursive(kbdev->mali_debugfs_directory); + return err; +} + +static void kbase_device_debugfs_term(struct kbase_device *kbdev) +{ + debugfs_remove_recursive(kbdev->mali_debugfs_directory); +} + +#else /* CONFIG_DEBUG_FS */ +static inline int kbase_device_debugfs_init(struct kbase_device *kbdev) +{ + return 0; +} + +static inline void kbase_device_debugfs_term(struct kbase_device *kbdev) { } +#endif /* CONFIG_DEBUG_FS */ + +static void kbase_device_coherency_init(struct kbase_device *kbdev, + unsigned prod_id) +{ +#ifdef CONFIG_OF + u32 supported_coherency_bitmap = + kbdev->gpu_props.props.raw_props.coherency_mode; + const void *coherency_override_dts; + u32 override_coherency; + + /* Only for tMIx : + * (COHERENCY_ACE_LITE | COHERENCY_ACE) was incorrectly + * documented for tMIx so force correct value here. + */ + if (GPU_ID_IS_NEW_FORMAT(prod_id) && + (GPU_ID2_MODEL_MATCH_VALUE(prod_id) == + GPU_ID2_PRODUCT_TMIX)) + if (supported_coherency_bitmap == + COHERENCY_FEATURE_BIT(COHERENCY_ACE)) + supported_coherency_bitmap |= + COHERENCY_FEATURE_BIT(COHERENCY_ACE_LITE); + +#endif /* CONFIG_OF */ + + kbdev->system_coherency = COHERENCY_NONE; + + /* device tree may override the coherency */ +#ifdef CONFIG_OF + coherency_override_dts = of_get_property(kbdev->dev->of_node, + "system-coherency", + NULL); + if (coherency_override_dts) { + + override_coherency = be32_to_cpup(coherency_override_dts); + + if ((override_coherency <= COHERENCY_NONE) && + (supported_coherency_bitmap & + COHERENCY_FEATURE_BIT(override_coherency))) { + + kbdev->system_coherency = override_coherency; + + dev_info(kbdev->dev, + "Using coherency mode %u set from dtb", + override_coherency); + } else + dev_warn(kbdev->dev, + "Ignoring unsupported coherency mode %u set from dtb", + override_coherency); + } + +#endif /* CONFIG_OF */ + + kbdev->gpu_props.props.raw_props.coherency_mode = + kbdev->system_coherency; +} + +#ifdef CONFIG_MALI_FPGA_BUS_LOGGER + +/* Callback used by the kbase bus logger client, to initiate a GPU reset + * when the bus log is restarted. GPU reset is used as reference point + * in HW bus log analyses. + */ +static void kbase_logging_started_cb(void *data) +{ + struct kbase_device *kbdev = (struct kbase_device *)data; + + if (kbase_prepare_to_reset_gpu(kbdev)) + kbase_reset_gpu(kbdev); + dev_info(kbdev->dev, "KBASE - Bus logger restarted\n"); +} +#endif + +static struct attribute *kbase_attrs[] = { +#ifdef CONFIG_MALI_DEBUG + &dev_attr_debug_command.attr, + &dev_attr_js_softstop_always.attr, +#endif +#if !MALI_CUSTOMER_RELEASE + &dev_attr_force_replay.attr, +#endif + &dev_attr_js_timeouts.attr, + &dev_attr_soft_job_timeout.attr, + &dev_attr_gpuinfo.attr, + &dev_attr_dvfs_period.attr, + &dev_attr_pm_poweroff.attr, + &dev_attr_reset_timeout.attr, + &dev_attr_js_scheduling_period.attr, + &dev_attr_power_policy.attr, + &dev_attr_core_availability_policy.attr, + &dev_attr_core_mask.attr, + &dev_attr_mem_pool_size.attr, + &dev_attr_mem_pool_max_size.attr, + &dev_attr_lp_mem_pool_size.attr, + &dev_attr_lp_mem_pool_max_size.attr, + NULL +}; + +static const struct attribute_group kbase_attr_group = { + .attrs = kbase_attrs, +}; + +static int kbase_platform_device_remove(struct platform_device *pdev) +{ + struct kbase_device *kbdev = to_kbase_device(&pdev->dev); + const struct list_head *dev_list; + + if (!kbdev) + return -ENODEV; + + kfree(kbdev->gpu_props.prop_buffer); + +#ifdef CONFIG_MALI_FPGA_BUS_LOGGER + if (kbdev->inited_subsys & inited_buslogger) { + bl_core_client_unregister(kbdev->buslogger); + kbdev->inited_subsys &= ~inited_buslogger; + } +#endif + + + if (kbdev->inited_subsys & inited_dev_list) { + dev_list = kbase_dev_list_get(); + list_del(&kbdev->entry); + kbase_dev_list_put(dev_list); + kbdev->inited_subsys &= ~inited_dev_list; + } + + if (kbdev->inited_subsys & inited_misc_register) { + misc_deregister(&kbdev->mdev); + kbdev->inited_subsys &= ~inited_misc_register; + } + + if (kbdev->inited_subsys & inited_sysfs_group) { + sysfs_remove_group(&kbdev->dev->kobj, &kbase_attr_group); + kbdev->inited_subsys &= ~inited_sysfs_group; + } + + if (kbdev->inited_subsys & inited_get_device) { + put_device(kbdev->dev); + kbdev->inited_subsys &= ~inited_get_device; + } + + if (kbdev->inited_subsys & inited_debugfs) { + kbase_device_debugfs_term(kbdev); + kbdev->inited_subsys &= ~inited_debugfs; + } + + if (kbdev->inited_subsys & inited_job_fault) { + kbase_debug_job_fault_dev_term(kbdev); + kbdev->inited_subsys &= ~inited_job_fault; + } + if (kbdev->inited_subsys & inited_vinstr) { + kbase_vinstr_term(kbdev->vinstr_ctx); + kbdev->inited_subsys &= ~inited_vinstr; + } + +#ifdef CONFIG_MALI_DEVFREQ + if (kbdev->inited_subsys & inited_devfreq) { + kbase_devfreq_term(kbdev); + kbdev->inited_subsys &= ~inited_devfreq; + } +#endif + + if (kbdev->inited_subsys & inited_backend_late) { + kbase_backend_late_term(kbdev); + kbdev->inited_subsys &= ~inited_backend_late; + } + + if (kbdev->inited_subsys & inited_tlstream) { + kbase_tlstream_term(); + kbdev->inited_subsys &= ~inited_tlstream; + } + + /* Bring job and mem sys to a halt before we continue termination */ + + if (kbdev->inited_subsys & inited_js) + kbasep_js_devdata_halt(kbdev); + + if (kbdev->inited_subsys & inited_mem) + kbase_mem_halt(kbdev); + + if (kbdev->inited_subsys & inited_protected) { + kbasep_protected_mode_term(kbdev); + kbdev->inited_subsys &= ~inited_protected; + } + + if (kbdev->inited_subsys & inited_js) { + kbasep_js_devdata_term(kbdev); + kbdev->inited_subsys &= ~inited_js; + } + + if (kbdev->inited_subsys & inited_mem) { + kbase_mem_term(kbdev); + kbdev->inited_subsys &= ~inited_mem; + } + + if (kbdev->inited_subsys & inited_ctx_sched) { + kbase_ctx_sched_term(kbdev); + kbdev->inited_subsys &= ~inited_ctx_sched; + } + + if (kbdev->inited_subsys & inited_device) { + kbase_device_term(kbdev); + kbdev->inited_subsys &= ~inited_device; + } + + if (kbdev->inited_subsys & inited_backend_early) { + kbase_backend_early_term(kbdev); + kbdev->inited_subsys &= ~inited_backend_early; + } + + if (kbdev->inited_subsys & inited_io_history) { + kbase_io_history_term(&kbdev->io_history); + kbdev->inited_subsys &= ~inited_io_history; + } + + if (kbdev->inited_subsys & inited_power_control) { + power_control_term(kbdev); + kbdev->inited_subsys &= ~inited_power_control; + } + + if (kbdev->inited_subsys & inited_registers_map) { + registers_unmap(kbdev); + kbdev->inited_subsys &= ~inited_registers_map; + } + +#ifdef CONFIG_MALI_NO_MALI + if (kbdev->inited_subsys & inited_gpu_device) { + gpu_device_destroy(kbdev); + kbdev->inited_subsys &= ~inited_gpu_device; + } +#endif /* CONFIG_MALI_NO_MALI */ + + if (kbdev->inited_subsys != 0) + dev_err(kbdev->dev, "Missing sub system termination\n"); + + kbase_device_free(kbdev); + + return 0; +} + + +/* Number of register accesses for the buffer that we allocate during + * initialization time. The buffer size can be changed later via debugfs. */ +#define KBASEP_DEFAULT_REGISTER_HISTORY_SIZE ((u16)512) + +static int kbase_platform_device_probe(struct platform_device *pdev) +{ + struct kbase_device *kbdev; + struct mali_base_gpu_core_props *core_props; + u32 gpu_id; + unsigned prod_id; + const struct list_head *dev_list; + int err = 0; + +#ifdef CONFIG_OF + err = kbase_platform_early_init(); + if (err) { + dev_err(&pdev->dev, "Early platform initialization failed\n"); + kbase_platform_device_remove(pdev); + return err; + } +#endif + kbdev = kbase_device_alloc(); + if (!kbdev) { + dev_err(&pdev->dev, "Allocate device failed\n"); + kbase_platform_device_remove(pdev); + return -ENOMEM; + } + + kbdev->dev = &pdev->dev; + dev_set_drvdata(kbdev->dev, kbdev); + +#ifdef CONFIG_MALI_NO_MALI + err = gpu_device_create(kbdev); + if (err) { + dev_err(&pdev->dev, "Dummy model initialization failed\n"); + kbase_platform_device_remove(pdev); + return err; + } + kbdev->inited_subsys |= inited_gpu_device; +#endif /* CONFIG_MALI_NO_MALI */ + + err = assign_irqs(pdev); + if (err) { + dev_err(&pdev->dev, "IRQ search failed\n"); + kbase_platform_device_remove(pdev); + return err; + } + + err = registers_map(kbdev); + if (err) { + dev_err(&pdev->dev, "Register map failed\n"); + kbase_platform_device_remove(pdev); + return err; + } + kbdev->inited_subsys |= inited_registers_map; + + err = power_control_init(pdev); + if (err) { + dev_err(&pdev->dev, "Power control initialization failed\n"); + kbase_platform_device_remove(pdev); + return err; + } + kbdev->inited_subsys |= inited_power_control; + + err = kbase_io_history_init(&kbdev->io_history, + KBASEP_DEFAULT_REGISTER_HISTORY_SIZE); + if (err) { + dev_err(&pdev->dev, "Register access history initialization failed\n"); + kbase_platform_device_remove(pdev); + return -ENOMEM; + } + kbdev->inited_subsys |= inited_io_history; + + err = kbase_backend_early_init(kbdev); + if (err) { + dev_err(kbdev->dev, "Early backend initialization failed\n"); + kbase_platform_device_remove(pdev); + return err; + } + kbdev->inited_subsys |= inited_backend_early; + + scnprintf(kbdev->devname, DEVNAME_SIZE, "%s%d", kbase_drv_name, + kbase_dev_nr); + + kbase_disjoint_init(kbdev); + + /* obtain min/max configured gpu frequencies */ + core_props = &(kbdev->gpu_props.props.core_props); + core_props->gpu_freq_khz_min = GPU_FREQ_KHZ_MIN; + core_props->gpu_freq_khz_max = GPU_FREQ_KHZ_MAX; + + err = kbase_device_init(kbdev); + if (err) { + dev_err(kbdev->dev, "Device initialization failed (%d)\n", err); + kbase_platform_device_remove(pdev); + return err; + } + kbdev->inited_subsys |= inited_device; + + err = kbase_ctx_sched_init(kbdev); + if (err) { + dev_err(kbdev->dev, "Context scheduler initialization failed (%d)\n", + err); + kbase_platform_device_remove(pdev); + return err; + } + kbdev->inited_subsys |= inited_ctx_sched; + + err = kbase_mem_init(kbdev); + if (err) { + dev_err(kbdev->dev, "Memory subsystem initialization failed\n"); + kbase_platform_device_remove(pdev); + return err; + } + kbdev->inited_subsys |= inited_mem; + + gpu_id = kbdev->gpu_props.props.raw_props.gpu_id; + gpu_id &= GPU_ID_VERSION_PRODUCT_ID; + prod_id = gpu_id >> GPU_ID_VERSION_PRODUCT_ID_SHIFT; + + kbase_device_coherency_init(kbdev, prod_id); + + err = kbasep_protected_mode_init(kbdev); + if (err) { + dev_err(kbdev->dev, "Protected mode subsystem initialization failed\n"); + kbase_platform_device_remove(pdev); + return err; + } + kbdev->inited_subsys |= inited_protected; + + dev_list = kbase_dev_list_get(); + list_add(&kbdev->entry, &kbase_dev_list); + kbase_dev_list_put(dev_list); + kbdev->inited_subsys |= inited_dev_list; + + err = kbasep_js_devdata_init(kbdev); + if (err) { + dev_err(kbdev->dev, "Job JS devdata initialization failed\n"); + kbase_platform_device_remove(pdev); + return err; + } + kbdev->inited_subsys |= inited_js; + + err = kbase_tlstream_init(); + if (err) { + dev_err(kbdev->dev, "Timeline stream initialization failed\n"); + kbase_platform_device_remove(pdev); + return err; + } + kbdev->inited_subsys |= inited_tlstream; + + err = kbase_backend_late_init(kbdev); + if (err) { + dev_err(kbdev->dev, "Late backend initialization failed\n"); + kbase_platform_device_remove(pdev); + return err; + } + kbdev->inited_subsys |= inited_backend_late; + + /* Initialize the kctx list. This is used by vinstr. */ + mutex_init(&kbdev->kctx_list_lock); + INIT_LIST_HEAD(&kbdev->kctx_list); + + kbdev->vinstr_ctx = kbase_vinstr_init(kbdev); + if (!kbdev->vinstr_ctx) { + dev_err(kbdev->dev, + "Virtual instrumentation initialization failed\n"); + kbase_platform_device_remove(pdev); + return -EINVAL; + } + kbdev->inited_subsys |= inited_vinstr; + +#ifdef CONFIG_MALI_DEVFREQ + /* Devfreq uses vinstr, so must be initialized after it. */ + err = kbase_devfreq_init(kbdev); + if (!err) + kbdev->inited_subsys |= inited_devfreq; + else + dev_err(kbdev->dev, "Continuing without devfreq\n"); +#endif /* CONFIG_MALI_DEVFREQ */ + + err = kbase_debug_job_fault_dev_init(kbdev); + if (err) { + dev_err(kbdev->dev, "Job fault debug initialization failed\n"); + kbase_platform_device_remove(pdev); + return err; + } + kbdev->inited_subsys |= inited_job_fault; + + err = kbase_device_debugfs_init(kbdev); + if (err) { + dev_err(kbdev->dev, "DebugFS initialization failed"); + kbase_platform_device_remove(pdev); + return err; + } + kbdev->inited_subsys |= inited_debugfs; + + kbdev->mdev.minor = MISC_DYNAMIC_MINOR; + kbdev->mdev.name = kbdev->devname; + kbdev->mdev.fops = &kbase_fops; + kbdev->mdev.parent = get_device(kbdev->dev); + kbdev->inited_subsys |= inited_get_device; + + /* This needs to happen before registering the device with misc_register(), + * otherwise it causes a race condition between registering the device and a + * uevent event being generated for userspace, causing udev rules to run + * which might expect certain sysfs attributes present. As a result of the + * race condition we avoid, some Mali sysfs entries may have appeared to + * udev to not exist. + + * For more information, see + * https://www.kernel.org/doc/Documentation/driver-model/device.txt, the + * paragraph that starts with "Word of warning", currently the second-last + * paragraph. + */ + err = sysfs_create_group(&kbdev->dev->kobj, &kbase_attr_group); + if (err) { + dev_err(&pdev->dev, "SysFS group creation failed\n"); + kbase_platform_device_remove(pdev); + return err; + } + kbdev->inited_subsys |= inited_sysfs_group; + + err = misc_register(&kbdev->mdev); + if (err) { + dev_err(kbdev->dev, "Misc device registration failed for %s\n", + kbdev->devname); + kbase_platform_device_remove(pdev); + return err; + } + kbdev->inited_subsys |= inited_misc_register; + + +#ifdef CONFIG_MALI_FPGA_BUS_LOGGER + err = bl_core_client_register(kbdev->devname, + kbase_logging_started_cb, + kbdev, &kbdev->buslogger, + THIS_MODULE, NULL); + if (err == 0) { + kbdev->inited_subsys |= inited_buslogger; + bl_core_set_threshold(kbdev->buslogger, 1024*1024*1024); + } else { + dev_warn(kbdev->dev, "Bus log client registration failed\n"); + err = 0; + } +#endif + + err = kbase_gpuprops_populate_user_buffer(kbdev); + if (err) { + dev_err(&pdev->dev, "GPU property population failed"); + kbase_platform_device_remove(pdev); + return err; + } + + dev_info(kbdev->dev, + "Probed as %s\n", dev_name(kbdev->mdev.this_device)); + + kbase_dev_nr++; + + return err; +} + +#undef KBASEP_DEFAULT_REGISTER_HISTORY_SIZE + +/** + * kbase_device_suspend - Suspend callback from the OS. + * + * This is called by Linux when the device should suspend. + * + * @dev: The device to suspend + * + * Return: A standard Linux error code + */ +static int kbase_device_suspend(struct device *dev) +{ + struct kbase_device *kbdev = to_kbase_device(dev); + + if (!kbdev) + return -ENODEV; + +#if defined(CONFIG_MALI_DEVFREQ) && \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)) + if (kbdev->inited_subsys & inited_devfreq) + devfreq_suspend_device(kbdev->devfreq); +#endif + + kbase_pm_suspend(kbdev); + return 0; +} + +/** + * kbase_device_resume - Resume callback from the OS. + * + * This is called by Linux when the device should resume from suspension. + * + * @dev: The device to resume + * + * Return: A standard Linux error code + */ +static int kbase_device_resume(struct device *dev) +{ + struct kbase_device *kbdev = to_kbase_device(dev); + + if (!kbdev) + return -ENODEV; + + kbase_pm_resume(kbdev); + +#if defined(CONFIG_MALI_DEVFREQ) && \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)) + if (kbdev->inited_subsys & inited_devfreq) + devfreq_resume_device(kbdev->devfreq); +#endif + return 0; +} + +/** + * kbase_device_runtime_suspend - Runtime suspend callback from the OS. + * + * This is called by Linux when the device should prepare for a condition in + * which it will not be able to communicate with the CPU(s) and RAM due to + * power management. + * + * @dev: The device to suspend + * + * Return: A standard Linux error code + */ +#ifdef KBASE_PM_RUNTIME +static int kbase_device_runtime_suspend(struct device *dev) +{ + struct kbase_device *kbdev = to_kbase_device(dev); + + if (!kbdev) + return -ENODEV; + +#if defined(CONFIG_MALI_DEVFREQ) && \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)) + if (kbdev->inited_subsys & inited_devfreq) + devfreq_suspend_device(kbdev->devfreq); +#endif + + if (kbdev->pm.backend.callback_power_runtime_off) { + kbdev->pm.backend.callback_power_runtime_off(kbdev); + dev_dbg(dev, "runtime suspend\n"); + } + return 0; +} +#endif /* KBASE_PM_RUNTIME */ + +/** + * kbase_device_runtime_resume - Runtime resume callback from the OS. + * + * This is called by Linux when the device should go into a fully active state. + * + * @dev: The device to suspend + * + * Return: A standard Linux error code + */ + +#ifdef KBASE_PM_RUNTIME +static int kbase_device_runtime_resume(struct device *dev) +{ + int ret = 0; + struct kbase_device *kbdev = to_kbase_device(dev); + + if (!kbdev) + return -ENODEV; + + if (kbdev->pm.backend.callback_power_runtime_on) { + ret = kbdev->pm.backend.callback_power_runtime_on(kbdev); + dev_dbg(dev, "runtime resume\n"); + } + +#if defined(CONFIG_MALI_DEVFREQ) && \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)) + if (kbdev->inited_subsys & inited_devfreq) + devfreq_resume_device(kbdev->devfreq); +#endif + + return ret; +} +#endif /* KBASE_PM_RUNTIME */ + + +#ifdef KBASE_PM_RUNTIME +/** + * kbase_device_runtime_idle - Runtime idle callback from the OS. + * @dev: The device to suspend + * + * This is called by Linux when the device appears to be inactive and it might + * be placed into a low power state. + * + * Return: 0 if device can be suspended, non-zero to avoid runtime autosuspend, + * otherwise a standard Linux error code + */ +static int kbase_device_runtime_idle(struct device *dev) +{ + struct kbase_device *kbdev = to_kbase_device(dev); + + if (!kbdev) + return -ENODEV; + + /* Use platform specific implementation if it exists. */ + if (kbdev->pm.backend.callback_power_runtime_idle) + return kbdev->pm.backend.callback_power_runtime_idle(kbdev); + + return 0; +} +#endif /* KBASE_PM_RUNTIME */ + +/* The power management operations for the platform driver. + */ +static const struct dev_pm_ops kbase_pm_ops = { + .suspend = kbase_device_suspend, + .resume = kbase_device_resume, +#ifdef KBASE_PM_RUNTIME + .runtime_suspend = kbase_device_runtime_suspend, + .runtime_resume = kbase_device_runtime_resume, + .runtime_idle = kbase_device_runtime_idle, +#endif /* KBASE_PM_RUNTIME */ +}; + +#ifdef CONFIG_OF +static const struct of_device_id kbase_dt_ids[] = { + { .compatible = "arm,malit6xx" }, + { .compatible = "arm,mali-t628" }, + { .compatible = "arm,mali-midgard" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, kbase_dt_ids); +#endif + +static struct platform_driver kbase_platform_driver = { + .probe = kbase_platform_device_probe, + .remove = kbase_platform_device_remove, + .driver = { + .name = kbase_drv_name, + .owner = THIS_MODULE, + .pm = &kbase_pm_ops, + .of_match_table = of_match_ptr(kbase_dt_ids), + }, +}; + +/* + * The driver will not provide a shortcut to create the Mali platform device + * anymore when using Device Tree. + */ +#ifdef CONFIG_OF +module_platform_driver(kbase_platform_driver); +#else + +static int __init kbase_driver_init(void) +{ + int ret; + + ret = kbase_platform_early_init(); + if (ret) + return ret; + + ret = kbase_platform_register(); + if (ret) + return ret; + + ret = platform_driver_register(&kbase_platform_driver); + + if (ret) + kbase_platform_unregister(); + + return ret; +} + +static void __exit kbase_driver_exit(void) +{ + platform_driver_unregister(&kbase_platform_driver); + kbase_platform_unregister(); +} + +module_init(kbase_driver_init); +module_exit(kbase_driver_exit); + +#endif /* CONFIG_OF */ + +MODULE_LICENSE("GPL"); +MODULE_VERSION(MALI_RELEASE_NAME " (UK version " \ + __stringify(BASE_UK_VERSION_MAJOR) "." \ + __stringify(BASE_UK_VERSION_MINOR) ")"); + +#if defined(CONFIG_MALI_GATOR_SUPPORT) || defined(CONFIG_MALI_SYSTEM_TRACE) +#define CREATE_TRACE_POINTS +#endif + +#ifdef CONFIG_MALI_GATOR_SUPPORT +/* Create the trace points (otherwise we just get code to call a tracepoint) */ +#include "mali_linux_trace.h" + +EXPORT_TRACEPOINT_SYMBOL_GPL(mali_job_slots_event); +EXPORT_TRACEPOINT_SYMBOL_GPL(mali_pm_status); +EXPORT_TRACEPOINT_SYMBOL_GPL(mali_pm_power_on); +EXPORT_TRACEPOINT_SYMBOL_GPL(mali_pm_power_off); +EXPORT_TRACEPOINT_SYMBOL_GPL(mali_page_fault_insert_pages); +EXPORT_TRACEPOINT_SYMBOL_GPL(mali_mmu_as_in_use); +EXPORT_TRACEPOINT_SYMBOL_GPL(mali_mmu_as_released); +EXPORT_TRACEPOINT_SYMBOL_GPL(mali_total_alloc_pages_change); + +void kbase_trace_mali_pm_status(u32 event, u64 value) +{ + trace_mali_pm_status(event, value); +} + +void kbase_trace_mali_pm_power_off(u32 event, u64 value) +{ + trace_mali_pm_power_off(event, value); +} + +void kbase_trace_mali_pm_power_on(u32 event, u64 value) +{ + trace_mali_pm_power_on(event, value); +} + +void kbase_trace_mali_job_slots_event(u32 event, const struct kbase_context *kctx, u8 atom_id) +{ + trace_mali_job_slots_event(event, (kctx != NULL ? kctx->tgid : 0), (kctx != NULL ? kctx->pid : 0), atom_id); +} + +void kbase_trace_mali_page_fault_insert_pages(int event, u32 value) +{ + trace_mali_page_fault_insert_pages(event, value); +} + +void kbase_trace_mali_mmu_as_in_use(int event) +{ + trace_mali_mmu_as_in_use(event); +} + +void kbase_trace_mali_mmu_as_released(int event) +{ + trace_mali_mmu_as_released(event); +} + +void kbase_trace_mali_total_alloc_pages_change(long long int event) +{ + trace_mali_total_alloc_pages_change(event); +} +#endif /* CONFIG_MALI_GATOR_SUPPORT */ +#ifdef CONFIG_MALI_SYSTEM_TRACE +#include "mali_linux_kbase_trace.h" +#endif diff --git a/drivers/gpu/arm/midgard/mali_kbase_ctx_sched.c b/drivers/gpu/arm/midgard/mali_kbase_ctx_sched.c new file mode 100644 index 00000000000000..e2f7baabad43fd --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_ctx_sched.c @@ -0,0 +1,203 @@ +/* + * + * (C) COPYRIGHT 2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +#include +#include + +#include "mali_kbase_ctx_sched.h" + +int kbase_ctx_sched_init(struct kbase_device *kbdev) +{ + int as_present = (1U << kbdev->nr_hw_address_spaces) - 1; + + /* These two must be recalculated if nr_hw_address_spaces changes + * (e.g. for HW workarounds) */ + kbdev->nr_user_address_spaces = kbdev->nr_hw_address_spaces; + if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8987)) { + bool use_workaround; + + use_workaround = DEFAULT_SECURE_BUT_LOSS_OF_PERFORMANCE; + if (use_workaround) { + dev_dbg(kbdev->dev, "GPU has HW ISSUE 8987, and driver configured for security workaround: 1 address space only"); + kbdev->nr_user_address_spaces = 1; + } + } + + kbdev->as_free = as_present; /* All ASs initially free */ + + memset(kbdev->as_to_kctx, 0, sizeof(kbdev->as_to_kctx)); + + return 0; +} + +void kbase_ctx_sched_term(struct kbase_device *kbdev) +{ + s8 i; + + /* Sanity checks */ + for (i = 0; i != kbdev->nr_hw_address_spaces; ++i) { + WARN_ON(kbdev->as_to_kctx[i] != NULL); + WARN_ON(!(kbdev->as_free & (1u << i))); + } +} + +/* kbasep_ctx_sched_find_as_for_ctx - Find a free address space + * + * @kbdev: The context for which to find a free address space + * + * Return: A valid AS if successful, otherwise KBASEP_AS_NR_INVALID + * + * This function returns an address space available for use. It would prefer + * returning an AS that has been previously assigned to the context to + * avoid having to reprogram the MMU. + */ +static int kbasep_ctx_sched_find_as_for_ctx(struct kbase_context *kctx) +{ + struct kbase_device *const kbdev = kctx->kbdev; + int free_as; + + lockdep_assert_held(&kbdev->hwaccess_lock); + + /* First check if the previously assigned AS is available */ + if ((kctx->as_nr != KBASEP_AS_NR_INVALID) && + (kbdev->as_free & (1u << kctx->as_nr))) + return kctx->as_nr; + + /* The previously assigned AS was taken, we'll be returning any free + * AS at this point. + */ + free_as = ffs(kbdev->as_free) - 1; + if (free_as >= 0 && free_as < kbdev->nr_hw_address_spaces) + return free_as; + + return KBASEP_AS_NR_INVALID; +} + +int kbase_ctx_sched_retain_ctx(struct kbase_context *kctx) +{ + struct kbase_device *const kbdev = kctx->kbdev; + + lockdep_assert_held(&kbdev->mmu_hw_mutex); + lockdep_assert_held(&kbdev->hwaccess_lock); + + WARN_ON(!kbdev->pm.backend.gpu_powered); + + if (atomic_inc_return(&kctx->refcount) == 1) { + int const free_as = kbasep_ctx_sched_find_as_for_ctx(kctx); + + if (free_as != KBASEP_AS_NR_INVALID) { + kbdev->as_free &= ~(1u << free_as); + /* Only program the MMU if the context has not been + * assigned the same address space before. + */ + if (free_as != kctx->as_nr) { + struct kbase_context *const prev_kctx = + kbdev->as_to_kctx[free_as]; + + if (prev_kctx) { + WARN_ON(atomic_read(&prev_kctx->refcount) != 0); + kbase_mmu_disable(prev_kctx); + prev_kctx->as_nr = KBASEP_AS_NR_INVALID; + } + + kctx->as_nr = free_as; + kbdev->as_to_kctx[free_as] = kctx; + kbase_mmu_update(kctx); + } + } else { + atomic_dec(&kctx->refcount); + + /* Failed to find an available address space, we must + * be returning an error at this point. + */ + WARN_ON(kctx->as_nr != KBASEP_AS_NR_INVALID); + } + } + + return kctx->as_nr; +} + +void kbase_ctx_sched_retain_ctx_refcount(struct kbase_context *kctx) +{ + struct kbase_device *const kbdev = kctx->kbdev; + + lockdep_assert_held(&kbdev->hwaccess_lock); + WARN_ON(atomic_read(&kctx->refcount) == 0); + WARN_ON(kctx->as_nr == KBASEP_AS_NR_INVALID); + WARN_ON(kbdev->as_to_kctx[kctx->as_nr] != kctx); + + atomic_inc(&kctx->refcount); +} + +void kbase_ctx_sched_release_ctx(struct kbase_context *kctx) +{ + struct kbase_device *const kbdev = kctx->kbdev; + + lockdep_assert_held(&kbdev->hwaccess_lock); + + if (atomic_dec_return(&kctx->refcount) == 0) + kbdev->as_free |= (1u << kctx->as_nr); +} + +void kbase_ctx_sched_remove_ctx(struct kbase_context *kctx) +{ + struct kbase_device *const kbdev = kctx->kbdev; + + lockdep_assert_held(&kbdev->mmu_hw_mutex); + lockdep_assert_held(&kbdev->hwaccess_lock); + + WARN_ON(atomic_read(&kctx->refcount) != 0); + + if (kctx->as_nr != KBASEP_AS_NR_INVALID) { + if (kbdev->pm.backend.gpu_powered) + kbase_mmu_disable(kctx); + + kbdev->as_to_kctx[kctx->as_nr] = NULL; + kctx->as_nr = KBASEP_AS_NR_INVALID; + } +} + +void kbase_ctx_sched_restore_all_as(struct kbase_device *kbdev) +{ + s8 i; + + lockdep_assert_held(&kbdev->mmu_hw_mutex); + lockdep_assert_held(&kbdev->hwaccess_lock); + + WARN_ON(!kbdev->pm.backend.gpu_powered); + + for (i = 0; i != kbdev->nr_hw_address_spaces; ++i) { + struct kbase_context *kctx; + + kctx = kbdev->as_to_kctx[i]; + if (kctx) { + if (atomic_read(&kctx->refcount)) { + WARN_ON(kctx->as_nr != i); + + kbase_mmu_update(kctx); + } else { + /* This context might have been assigned an + * AS before, clear it. + */ + kbdev->as_to_kctx[kctx->as_nr] = NULL; + kctx->as_nr = KBASEP_AS_NR_INVALID; + } + } else { + kbase_mmu_disable_as(kbdev, i); + } + } +} diff --git a/drivers/gpu/arm/midgard/mali_kbase_ctx_sched.h b/drivers/gpu/arm/midgard/mali_kbase_ctx_sched.h new file mode 100644 index 00000000000000..77d2232d9e1ac0 --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_ctx_sched.h @@ -0,0 +1,131 @@ +/* + * + * (C) COPYRIGHT 2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +#ifndef _KBASE_CTX_SCHED_H_ +#define _KBASE_CTX_SCHED_H_ + +#include + +/* The Context Scheduler manages address space assignment and reference + * counting to kbase_context. The interface has been designed to minimise + * interactions between the Job Scheduler and Power Management/MMU to support + * the existing Job Scheduler interface. + * + * The initial implementation of the Context Scheduler does not schedule + * contexts. Instead it relies on the Job Scheduler to make decisions of + * when to schedule/evict contexts if address spaces are starved. In the + * future, once an interface between the CS and JS has been devised to + * provide enough information about how each context is consuming GPU resources, + * those decisions can be made in the CS itself, thereby reducing duplicated + * code. + */ + +/* base_ctx_sched_init - Initialise the context scheduler + * + * @kbdev: The device for which the context scheduler needs to be + * initialised + * + * Return: 0 for success, otherwise failure + * + * This must be called during device initilisation. The number of hardware + * address spaces must already be established before calling this function. + */ +int kbase_ctx_sched_init(struct kbase_device *kbdev); + +/* base_ctx_sched_term - Terminate the context scheduler + * + * @kbdev: The device for which the context scheduler needs to be + * terminated + * + * This must be called during device termination after all contexts have been + * destroyed. + */ +void kbase_ctx_sched_term(struct kbase_device *kbdev); + +/* kbase_ctx_sched_retain_ctx - Retain a reference to the @ref kbase_context + * + * @kctx: The context to which to retain a reference + * + * Return: The address space that the context has been assigned to or + * KBASEP_AS_NR_INVALID if no address space was available. + * + * This function should be called whenever an address space should be assigned + * to a context and programmed onto the MMU. It should typically be called + * when jobs are ready to be submitted to the GPU. + * + * It can be called as many times as necessary. The address space will be + * assigned to the context for as long as there is a reference to said context. + * + * The kbase_device::mmu_hw_mutex and kbase_device::hwaccess_lock locks must be + * held whilst calling this function. + */ +int kbase_ctx_sched_retain_ctx(struct kbase_context *kctx); + +/* kbase_ctx_sched_retain_ctx_refcount + * + * @kctx: The context to which to retain a reference + * + * This function only retains a reference to the context. It must be called + * only when the context already has a reference. + * + * This is typically called inside an atomic session where we know the context + * is already scheduled in but want to take an extra reference to ensure that + * it doesn't get descheduled. + * + * The kbase_device::hwaccess_lock must be held whilst calling this function + */ +void kbase_ctx_sched_retain_ctx_refcount(struct kbase_context *kctx); + +/* kbase_ctx_sched_release_ctx - Release a reference to the @ref kbase_context + * + * @kctx: The context from which to release a reference + * + * This function should be called whenever an address space could be unassigned + * from a context. When there are no more references to said context, the + * address space previously assigned to this context shall be reassigned to + * other contexts as needed. + * + * The kbase_device::hwaccess_lock must be held whilst calling this function + */ +void kbase_ctx_sched_release_ctx(struct kbase_context *kctx); + +/* kbase_ctx_sched_remove_ctx - Unassign previously assigned address space + * + * @kctx: The context to be removed + * + * This function should be called when a context is being destroyed. The + * context must no longer have any reference. If it has been assigned an + * address space before then the AS will be unprogrammed. + * + * The kbase_device::mmu_hw_mutex and kbase_device::hwaccess_lock locks must be + * held whilst calling this function. + */ +void kbase_ctx_sched_remove_ctx(struct kbase_context *kctx); + +/* kbase_ctx_sched_restore_all_as - Reprogram all address spaces + * + * @kbdev: The device for which address spaces to be reprogrammed + * + * This function shall reprogram all address spaces previously assigned to + * contexts. It can be used after the GPU is reset. + * + * The kbase_device::mmu_hw_mutex and kbase_device::hwaccess_lock locks must be + * held whilst calling this function. + */ +void kbase_ctx_sched_restore_all_as(struct kbase_device *kbdev); + +#endif /* _KBASE_CTX_SCHED_H_ */ diff --git a/drivers/gpu/arm/midgard/mali_kbase_debug.c b/drivers/gpu/arm/midgard/mali_kbase_debug.c new file mode 100644 index 00000000000000..fb57ac2e31adb8 --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_debug.c @@ -0,0 +1,39 @@ +/* + * + * (C) COPYRIGHT 2012-2014 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + + +#include + +static struct kbasep_debug_assert_cb kbasep_debug_assert_registered_cb = { + NULL, + NULL +}; + +void kbase_debug_assert_register_hook(kbase_debug_assert_hook *func, void *param) +{ + kbasep_debug_assert_registered_cb.func = func; + kbasep_debug_assert_registered_cb.param = param; +} + +void kbasep_debug_assert_call_hook(void) +{ + if (kbasep_debug_assert_registered_cb.func != NULL) + kbasep_debug_assert_registered_cb.func(kbasep_debug_assert_registered_cb.param); +} +KBASE_EXPORT_SYMBOL(kbasep_debug_assert_call_hook); + diff --git a/drivers/gpu/arm/midgard/mali_kbase_debug.h b/drivers/gpu/arm/midgard/mali_kbase_debug.h new file mode 100644 index 00000000000000..d7873c5eabf93a --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_debug.h @@ -0,0 +1,164 @@ +/* + * + * (C) COPYRIGHT 2012-2015, 2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + + +#ifndef _KBASE_DEBUG_H +#define _KBASE_DEBUG_H + +#include + +/** @brief If equals to 0, a trace containing the file, line, and function will be displayed before each message. */ +#define KBASE_DEBUG_SKIP_TRACE 0 + +/** @brief If different from 0, the trace will only contain the file and line. */ +#define KBASE_DEBUG_SKIP_FUNCTION_NAME 0 + +/** @brief Disable the asserts tests if set to 1. Default is to disable the asserts in release. */ +#ifndef KBASE_DEBUG_DISABLE_ASSERTS +#ifdef CONFIG_MALI_DEBUG +#define KBASE_DEBUG_DISABLE_ASSERTS 0 +#else +#define KBASE_DEBUG_DISABLE_ASSERTS 1 +#endif +#endif /* KBASE_DEBUG_DISABLE_ASSERTS */ + +/** Function type that is called on an KBASE_DEBUG_ASSERT() or KBASE_DEBUG_ASSERT_MSG() */ +typedef void (kbase_debug_assert_hook) (void *); + +struct kbasep_debug_assert_cb { + kbase_debug_assert_hook *func; + void *param; +}; + +/** + * @def KBASEP_DEBUG_PRINT_TRACE + * @brief Private macro containing the format of the trace to display before every message + * @sa KBASE_DEBUG_SKIP_TRACE, KBASE_DEBUG_SKIP_FUNCTION_NAME + */ +#if !KBASE_DEBUG_SKIP_TRACE +#define KBASEP_DEBUG_PRINT_TRACE \ + "In file: " __FILE__ " line: " CSTD_STR2(__LINE__) +#if !KBASE_DEBUG_SKIP_FUNCTION_NAME +#define KBASEP_DEBUG_PRINT_FUNCTION __func__ +#else +#define KBASEP_DEBUG_PRINT_FUNCTION "" +#endif +#else +#define KBASEP_DEBUG_PRINT_TRACE "" +#endif + +/** + * @def KBASEP_DEBUG_ASSERT_OUT(trace, function, ...) + * @brief (Private) system printing function associated to the @ref KBASE_DEBUG_ASSERT_MSG event. + * @param trace location in the code from where the message is printed + * @param function function from where the message is printed + * @param ... Format string followed by format arguments. + * @note function parameter cannot be concatenated with other strings + */ +/* Select the correct system output function*/ +#ifdef CONFIG_MALI_DEBUG +#define KBASEP_DEBUG_ASSERT_OUT(trace, function, ...)\ + do { \ + pr_err("Mali: %s function:%s ", trace, function);\ + pr_err(__VA_ARGS__);\ + pr_err("\n");\ + } while (false) +#else +#define KBASEP_DEBUG_ASSERT_OUT(trace, function, ...) CSTD_NOP() +#endif + +#ifdef CONFIG_MALI_DEBUG +#define KBASE_CALL_ASSERT_HOOK() kbasep_debug_assert_call_hook() +#else +#define KBASE_CALL_ASSERT_HOOK() CSTD_NOP() +#endif + +/** + * @def KBASE_DEBUG_ASSERT(expr) + * @brief Calls @ref KBASE_PRINT_ASSERT and prints the expression @a expr if @a expr is false + * + * @note This macro does nothing if the flag @ref KBASE_DEBUG_DISABLE_ASSERTS is set to 1 + * + * @param expr Boolean expression + */ +#define KBASE_DEBUG_ASSERT(expr) \ + KBASE_DEBUG_ASSERT_MSG(expr, #expr) + +#if KBASE_DEBUG_DISABLE_ASSERTS +#define KBASE_DEBUG_ASSERT_MSG(expr, ...) CSTD_NOP() +#else + /** + * @def KBASE_DEBUG_ASSERT_MSG(expr, ...) + * @brief Calls @ref KBASEP_DEBUG_ASSERT_OUT and prints the given message if @a expr is false + * + * @note This macro does nothing if the flag @ref KBASE_DEBUG_DISABLE_ASSERTS is set to 1 + * + * @param expr Boolean expression + * @param ... Message to display when @a expr is false, as a format string followed by format arguments. + */ +#define KBASE_DEBUG_ASSERT_MSG(expr, ...) \ + do { \ + if (!(expr)) { \ + KBASEP_DEBUG_ASSERT_OUT(KBASEP_DEBUG_PRINT_TRACE, KBASEP_DEBUG_PRINT_FUNCTION, __VA_ARGS__);\ + KBASE_CALL_ASSERT_HOOK();\ + BUG();\ + } \ + } while (false) +#endif /* KBASE_DEBUG_DISABLE_ASSERTS */ + +/** + * @def KBASE_DEBUG_CODE( X ) + * @brief Executes the code inside the macro only in debug mode + * + * @param X Code to compile only in debug mode. + */ +#ifdef CONFIG_MALI_DEBUG +#define KBASE_DEBUG_CODE(X) X +#else +#define KBASE_DEBUG_CODE(X) CSTD_NOP() +#endif /* CONFIG_MALI_DEBUG */ + +/** @} */ + +/** + * @brief Register a function to call on ASSERT + * + * Such functions will \b only be called during Debug mode, and for debugging + * features \b only. Do not rely on them to be called in general use. + * + * To disable the hook, supply NULL to \a func. + * + * @note This function is not thread-safe, and should only be used to + * register/deregister once in the module's lifetime. + * + * @param[in] func the function to call when an assert is triggered. + * @param[in] param the parameter to pass to \a func when calling it + */ +void kbase_debug_assert_register_hook(kbase_debug_assert_hook *func, void *param); + +/** + * @brief Call a debug assert hook previously registered with kbase_debug_assert_register_hook() + * + * @note This function is not thread-safe with respect to multiple threads + * registering functions and parameters with + * kbase_debug_assert_register_hook(). Otherwise, thread safety is the + * responsibility of the registered hook. + */ +void kbasep_debug_assert_call_hook(void); + +#endif /* _KBASE_DEBUG_H */ diff --git a/drivers/gpu/arm/midgard/mali_kbase_debug_job_fault.c b/drivers/gpu/arm/midgard/mali_kbase_debug_job_fault.c new file mode 100644 index 00000000000000..f29430ddf8f9bd --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_debug_job_fault.c @@ -0,0 +1,499 @@ +/* + * + * (C) COPYRIGHT 2012-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +#include +#include +#include + +#ifdef CONFIG_DEBUG_FS + +static bool kbase_is_job_fault_event_pending(struct kbase_device *kbdev) +{ + struct list_head *event_list = &kbdev->job_fault_event_list; + unsigned long flags; + bool ret; + + spin_lock_irqsave(&kbdev->job_fault_event_lock, flags); + ret = !list_empty(event_list); + spin_unlock_irqrestore(&kbdev->job_fault_event_lock, flags); + + return ret; +} + +static bool kbase_ctx_has_no_event_pending(struct kbase_context *kctx) +{ + struct kbase_device *kbdev = kctx->kbdev; + struct list_head *event_list = &kctx->kbdev->job_fault_event_list; + struct base_job_fault_event *event; + unsigned long flags; + + spin_lock_irqsave(&kbdev->job_fault_event_lock, flags); + if (list_empty(event_list)) { + spin_unlock_irqrestore(&kbdev->job_fault_event_lock, flags); + return true; + } + list_for_each_entry(event, event_list, head) { + if (event->katom->kctx == kctx) { + spin_unlock_irqrestore(&kbdev->job_fault_event_lock, + flags); + return false; + } + } + spin_unlock_irqrestore(&kbdev->job_fault_event_lock, flags); + return true; +} + +/* wait until the fault happen and copy the event */ +static int kbase_job_fault_event_wait(struct kbase_device *kbdev, + struct base_job_fault_event *event) +{ + struct list_head *event_list = &kbdev->job_fault_event_list; + struct base_job_fault_event *event_in; + unsigned long flags; + + spin_lock_irqsave(&kbdev->job_fault_event_lock, flags); + if (list_empty(event_list)) { + spin_unlock_irqrestore(&kbdev->job_fault_event_lock, flags); + if (wait_event_interruptible(kbdev->job_fault_wq, + kbase_is_job_fault_event_pending(kbdev))) + return -ERESTARTSYS; + spin_lock_irqsave(&kbdev->job_fault_event_lock, flags); + } + + event_in = list_entry(event_list->next, + struct base_job_fault_event, head); + event->event_code = event_in->event_code; + event->katom = event_in->katom; + + spin_unlock_irqrestore(&kbdev->job_fault_event_lock, flags); + + return 0; + +} + +/* remove the event from the queue */ +static struct base_job_fault_event *kbase_job_fault_event_dequeue( + struct kbase_device *kbdev, struct list_head *event_list) +{ + struct base_job_fault_event *event; + + event = list_entry(event_list->next, + struct base_job_fault_event, head); + list_del(event_list->next); + + return event; + +} + +/* Remove all the following atoms after the failed atom in the same context + * Call the postponed bottom half of job done. + * Then, this context could be rescheduled. + */ +static void kbase_job_fault_resume_event_cleanup(struct kbase_context *kctx) +{ + struct list_head *event_list = &kctx->job_fault_resume_event_list; + + while (!list_empty(event_list)) { + struct base_job_fault_event *event; + + event = kbase_job_fault_event_dequeue(kctx->kbdev, + &kctx->job_fault_resume_event_list); + kbase_jd_done_worker(&event->katom->work); + } + +} + +/* Remove all the failed atoms that belong to different contexts + * Resume all the contexts that were suspend due to failed job + */ +static void kbase_job_fault_event_cleanup(struct kbase_device *kbdev) +{ + struct list_head *event_list = &kbdev->job_fault_event_list; + unsigned long flags; + + spin_lock_irqsave(&kbdev->job_fault_event_lock, flags); + while (!list_empty(event_list)) { + kbase_job_fault_event_dequeue(kbdev, event_list); + spin_unlock_irqrestore(&kbdev->job_fault_event_lock, flags); + wake_up(&kbdev->job_fault_resume_wq); + spin_lock_irqsave(&kbdev->job_fault_event_lock, flags); + } + spin_unlock_irqrestore(&kbdev->job_fault_event_lock, flags); +} + +static void kbase_job_fault_resume_worker(struct work_struct *data) +{ + struct base_job_fault_event *event = container_of(data, + struct base_job_fault_event, job_fault_work); + struct kbase_context *kctx; + struct kbase_jd_atom *katom; + + katom = event->katom; + kctx = katom->kctx; + + dev_info(kctx->kbdev->dev, "Job dumping wait\n"); + + /* When it was waked up, it need to check if queue is empty or the + * failed atom belongs to different context. If yes, wake up. Both + * of them mean the failed job has been dumped. Please note, it + * should never happen that the job_fault_event_list has the two + * atoms belong to the same context. + */ + wait_event(kctx->kbdev->job_fault_resume_wq, + kbase_ctx_has_no_event_pending(kctx)); + + atomic_set(&kctx->job_fault_count, 0); + kbase_jd_done_worker(&katom->work); + + /* In case the following atoms were scheduled during failed job dump + * the job_done_worker was held. We need to rerun it after the dump + * was finished + */ + kbase_job_fault_resume_event_cleanup(kctx); + + dev_info(kctx->kbdev->dev, "Job dumping finish, resume scheduler\n"); +} + +static struct base_job_fault_event *kbase_job_fault_event_queue( + struct list_head *event_list, + struct kbase_jd_atom *atom, + u32 completion_code) +{ + struct base_job_fault_event *event; + + event = &atom->fault_event; + + event->katom = atom; + event->event_code = completion_code; + + list_add_tail(&event->head, event_list); + + return event; + +} + +static void kbase_job_fault_event_post(struct kbase_device *kbdev, + struct kbase_jd_atom *katom, u32 completion_code) +{ + struct base_job_fault_event *event; + unsigned long flags; + + spin_lock_irqsave(&kbdev->job_fault_event_lock, flags); + event = kbase_job_fault_event_queue(&kbdev->job_fault_event_list, + katom, completion_code); + spin_unlock_irqrestore(&kbdev->job_fault_event_lock, flags); + + wake_up_interruptible(&kbdev->job_fault_wq); + + INIT_WORK(&event->job_fault_work, kbase_job_fault_resume_worker); + queue_work(kbdev->job_fault_resume_workq, &event->job_fault_work); + + dev_info(katom->kctx->kbdev->dev, "Job fault happen, start dump: %d_%d", + katom->kctx->tgid, katom->kctx->id); + +} + +/* + * This function will process the job fault + * Get the register copy + * Send the failed job dump event + * Create a Wait queue to wait until the job dump finish + */ + +bool kbase_debug_job_fault_process(struct kbase_jd_atom *katom, + u32 completion_code) +{ + struct kbase_context *kctx = katom->kctx; + + /* Check if dumping is in the process + * only one atom of each context can be dumped at the same time + * If the atom belongs to different context, it can be dumped + */ + if (atomic_read(&kctx->job_fault_count) > 0) { + kbase_job_fault_event_queue( + &kctx->job_fault_resume_event_list, + katom, completion_code); + dev_info(kctx->kbdev->dev, "queue:%d\n", + kbase_jd_atom_id(kctx, katom)); + return true; + } + + if (kctx->kbdev->job_fault_debug == true) { + + if (completion_code != BASE_JD_EVENT_DONE) { + + if (kbase_job_fault_get_reg_snapshot(kctx) == false) { + dev_warn(kctx->kbdev->dev, "get reg dump failed\n"); + return false; + } + + kbase_job_fault_event_post(kctx->kbdev, katom, + completion_code); + atomic_inc(&kctx->job_fault_count); + dev_info(kctx->kbdev->dev, "post:%d\n", + kbase_jd_atom_id(kctx, katom)); + return true; + + } + } + return false; + +} + +static int debug_job_fault_show(struct seq_file *m, void *v) +{ + struct kbase_device *kbdev = m->private; + struct base_job_fault_event *event = (struct base_job_fault_event *)v; + struct kbase_context *kctx = event->katom->kctx; + int i; + + dev_info(kbdev->dev, "debug job fault seq show:%d_%d, %d", + kctx->tgid, kctx->id, event->reg_offset); + + if (kctx->reg_dump == NULL) { + dev_warn(kbdev->dev, "reg dump is NULL"); + return -1; + } + + if (kctx->reg_dump[event->reg_offset] == + REGISTER_DUMP_TERMINATION_FLAG) { + /* Return the error here to stop the read. And the + * following next() will not be called. The stop can + * get the real event resource and release it + */ + return -1; + } + + if (event->reg_offset == 0) + seq_printf(m, "%d_%d\n", kctx->tgid, kctx->id); + + for (i = 0; i < 50; i++) { + if (kctx->reg_dump[event->reg_offset] == + REGISTER_DUMP_TERMINATION_FLAG) { + break; + } + seq_printf(m, "%08x: %08x\n", + kctx->reg_dump[event->reg_offset], + kctx->reg_dump[1+event->reg_offset]); + event->reg_offset += 2; + + } + + + return 0; +} +static void *debug_job_fault_next(struct seq_file *m, void *v, loff_t *pos) +{ + struct kbase_device *kbdev = m->private; + struct base_job_fault_event *event = (struct base_job_fault_event *)v; + + dev_info(kbdev->dev, "debug job fault seq next:%d, %d", + event->reg_offset, (int)*pos); + + return event; +} + +static void *debug_job_fault_start(struct seq_file *m, loff_t *pos) +{ + struct kbase_device *kbdev = m->private; + struct base_job_fault_event *event; + + dev_info(kbdev->dev, "fault job seq start:%d", (int)*pos); + + /* The condition is trick here. It needs make sure the + * fault hasn't happened and the dumping hasn't been started, + * or the dumping has finished + */ + if (*pos == 0) { + event = kmalloc(sizeof(*event), GFP_KERNEL); + if (!event) + return NULL; + event->reg_offset = 0; + if (kbase_job_fault_event_wait(kbdev, event)) { + kfree(event); + return NULL; + } + + /* The cache flush workaround is called in bottom half of + * job done but we delayed it. Now we should clean cache + * earlier. Then the GPU memory dump should be correct. + */ + kbase_backend_cacheclean(kbdev, event->katom); + } else + return NULL; + + return event; +} + +static void debug_job_fault_stop(struct seq_file *m, void *v) +{ + struct kbase_device *kbdev = m->private; + + /* here we wake up the kbase_jd_done_worker after stop, it needs + * get the memory dump before the register dump in debug daemon, + * otherwise, the memory dump may be incorrect. + */ + + if (v != NULL) { + kfree(v); + dev_info(kbdev->dev, "debug job fault seq stop stage 1"); + + } else { + unsigned long flags; + + spin_lock_irqsave(&kbdev->job_fault_event_lock, flags); + if (!list_empty(&kbdev->job_fault_event_list)) { + kbase_job_fault_event_dequeue(kbdev, + &kbdev->job_fault_event_list); + wake_up(&kbdev->job_fault_resume_wq); + } + spin_unlock_irqrestore(&kbdev->job_fault_event_lock, flags); + dev_info(kbdev->dev, "debug job fault seq stop stage 2"); + } + +} + +static const struct seq_operations ops = { + .start = debug_job_fault_start, + .next = debug_job_fault_next, + .stop = debug_job_fault_stop, + .show = debug_job_fault_show, +}; + +static int debug_job_fault_open(struct inode *in, struct file *file) +{ + struct kbase_device *kbdev = in->i_private; + + seq_open(file, &ops); + + ((struct seq_file *)file->private_data)->private = kbdev; + dev_info(kbdev->dev, "debug job fault seq open"); + + kbdev->job_fault_debug = true; + + return 0; + +} + +static int debug_job_fault_release(struct inode *in, struct file *file) +{ + struct kbase_device *kbdev = in->i_private; + + seq_release(in, file); + + kbdev->job_fault_debug = false; + + /* Clean the unprocessed job fault. After that, all the suspended + * contexts could be rescheduled. + */ + kbase_job_fault_event_cleanup(kbdev); + + dev_info(kbdev->dev, "debug job fault seq close"); + + return 0; +} + +static const struct file_operations kbasep_debug_job_fault_fops = { + .open = debug_job_fault_open, + .read = seq_read, + .llseek = seq_lseek, + .release = debug_job_fault_release, +}; + +/* + * Initialize debugfs entry for job fault dump + */ +void kbase_debug_job_fault_debugfs_init(struct kbase_device *kbdev) +{ + debugfs_create_file("job_fault", S_IRUGO, + kbdev->mali_debugfs_directory, kbdev, + &kbasep_debug_job_fault_fops); +} + + +int kbase_debug_job_fault_dev_init(struct kbase_device *kbdev) +{ + + INIT_LIST_HEAD(&kbdev->job_fault_event_list); + + init_waitqueue_head(&(kbdev->job_fault_wq)); + init_waitqueue_head(&(kbdev->job_fault_resume_wq)); + spin_lock_init(&kbdev->job_fault_event_lock); + + kbdev->job_fault_resume_workq = alloc_workqueue( + "kbase_job_fault_resume_work_queue", WQ_MEM_RECLAIM, 1); + if (!kbdev->job_fault_resume_workq) + return -ENOMEM; + + kbdev->job_fault_debug = false; + + return 0; +} + +/* + * Release the relevant resource per device + */ +void kbase_debug_job_fault_dev_term(struct kbase_device *kbdev) +{ + destroy_workqueue(kbdev->job_fault_resume_workq); +} + + +/* + * Initialize the relevant data structure per context + */ +void kbase_debug_job_fault_context_init(struct kbase_context *kctx) +{ + + /* We need allocate double size register range + * Because this memory will keep the register address and value + */ + kctx->reg_dump = vmalloc(0x4000 * 2); + if (kctx->reg_dump == NULL) + return; + + if (kbase_debug_job_fault_reg_snapshot_init(kctx, 0x4000) == false) { + vfree(kctx->reg_dump); + kctx->reg_dump = NULL; + } + INIT_LIST_HEAD(&kctx->job_fault_resume_event_list); + atomic_set(&kctx->job_fault_count, 0); + +} + +/* + * release the relevant resource per context + */ +void kbase_debug_job_fault_context_term(struct kbase_context *kctx) +{ + vfree(kctx->reg_dump); +} + +#else /* CONFIG_DEBUG_FS */ + +int kbase_debug_job_fault_dev_init(struct kbase_device *kbdev) +{ + kbdev->job_fault_debug = false; + + return 0; +} + +void kbase_debug_job_fault_dev_term(struct kbase_device *kbdev) +{ +} + +#endif /* CONFIG_DEBUG_FS */ diff --git a/drivers/gpu/arm/midgard/mali_kbase_debug_job_fault.h b/drivers/gpu/arm/midgard/mali_kbase_debug_job_fault.h new file mode 100644 index 00000000000000..a2bf8983c37c8e --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_debug_job_fault.h @@ -0,0 +1,96 @@ +/* + * + * (C) COPYRIGHT 2012-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +#ifndef _KBASE_DEBUG_JOB_FAULT_H +#define _KBASE_DEBUG_JOB_FAULT_H + +#include +#include + +#define REGISTER_DUMP_TERMINATION_FLAG 0xFFFFFFFF + +/** + * kbase_debug_job_fault_dev_init - Create the fault event wait queue + * per device and initialize the required lists. + * @kbdev: Device pointer + * + * Return: Zero on success or a negative error code. + */ +int kbase_debug_job_fault_dev_init(struct kbase_device *kbdev); + +/** + * kbase_debug_job_fault_debugfs_init - Initialize job fault debug sysfs + * @kbdev: Device pointer + */ +void kbase_debug_job_fault_debugfs_init(struct kbase_device *kbdev); + +/** + * kbase_debug_job_fault_dev_term - Clean up resources created in + * kbase_debug_job_fault_dev_init. + * @kbdev: Device pointer + */ +void kbase_debug_job_fault_dev_term(struct kbase_device *kbdev); + +/** + * kbase_debug_job_fault_context_init - Initialize the relevant + * data structure per context + * @kctx: KBase context pointer + */ +void kbase_debug_job_fault_context_init(struct kbase_context *kctx); + +/** + * kbase_debug_job_fault_context_term - Release the relevant + * resource per context + * @kctx: KBase context pointer + */ +void kbase_debug_job_fault_context_term(struct kbase_context *kctx); + +/** + * kbase_debug_job_fault_process - Process the failed job. + * It will send a event and wake up the job fault waiting queue + * Then create a work queue to wait for job dump finish + * This function should be called in the interrupt handler and before + * jd_done that make sure the jd_done_worker will be delayed until the + * job dump finish + * @katom: The failed atom pointer + * @completion_code: the job status + * @return true if dump is going on + */ +bool kbase_debug_job_fault_process(struct kbase_jd_atom *katom, + u32 completion_code); + + +/** + * kbase_debug_job_fault_reg_snapshot_init - Set the interested registers + * address during the job fault process, the relevant registers will + * be saved when a job fault happen + * @kctx: KBase context pointer + * @reg_range: Maximum register address space + * @return true if initializing successfully + */ +bool kbase_debug_job_fault_reg_snapshot_init(struct kbase_context *kctx, + int reg_range); + +/** + * kbase_job_fault_get_reg_snapshot - Read the interested registers for + * failed job dump + * @kctx: KBase context pointer + * @return true if getting registers successfully + */ +bool kbase_job_fault_get_reg_snapshot(struct kbase_context *kctx); + +#endif /*_KBASE_DEBUG_JOB_FAULT_H*/ diff --git a/drivers/gpu/arm/midgard/mali_kbase_debug_mem_view.c b/drivers/gpu/arm/midgard/mali_kbase_debug_mem_view.c new file mode 100644 index 00000000000000..c65b4dfd7c6ed9 --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_debug_mem_view.c @@ -0,0 +1,306 @@ +/* + * + * (C) COPYRIGHT 2013-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +/* + * Debugfs interface to dump the memory visible to the GPU + */ + +#include "mali_kbase_debug_mem_view.h" +#include "mali_kbase.h" + +#include +#include + +#ifdef CONFIG_DEBUG_FS + +struct debug_mem_mapping { + struct list_head node; + + struct kbase_mem_phy_alloc *alloc; + unsigned long flags; + + u64 start_pfn; + size_t nr_pages; +}; + +struct debug_mem_data { + struct list_head mapping_list; + struct kbase_context *kctx; +}; + +struct debug_mem_seq_off { + struct list_head *lh; + size_t offset; +}; + +static void *debug_mem_start(struct seq_file *m, loff_t *_pos) +{ + struct debug_mem_data *mem_data = m->private; + struct debug_mem_seq_off *data; + struct debug_mem_mapping *map; + loff_t pos = *_pos; + + list_for_each_entry(map, &mem_data->mapping_list, node) { + if (pos >= map->nr_pages) { + pos -= map->nr_pages; + } else { + data = kmalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return NULL; + data->lh = &map->node; + data->offset = pos; + return data; + } + } + + /* Beyond the end */ + return NULL; +} + +static void debug_mem_stop(struct seq_file *m, void *v) +{ + kfree(v); +} + +static void *debug_mem_next(struct seq_file *m, void *v, loff_t *pos) +{ + struct debug_mem_data *mem_data = m->private; + struct debug_mem_seq_off *data = v; + struct debug_mem_mapping *map; + + map = list_entry(data->lh, struct debug_mem_mapping, node); + + if (data->offset < map->nr_pages - 1) { + data->offset++; + ++*pos; + return data; + } + + if (list_is_last(data->lh, &mem_data->mapping_list)) { + kfree(data); + return NULL; + } + + data->lh = data->lh->next; + data->offset = 0; + ++*pos; + + return data; +} + +static int debug_mem_show(struct seq_file *m, void *v) +{ + struct debug_mem_data *mem_data = m->private; + struct debug_mem_seq_off *data = v; + struct debug_mem_mapping *map; + int i, j; + struct page *page; + uint32_t *mapping; + pgprot_t prot = PAGE_KERNEL; + + map = list_entry(data->lh, struct debug_mem_mapping, node); + + kbase_gpu_vm_lock(mem_data->kctx); + + if (data->offset >= map->alloc->nents) { + seq_printf(m, "%016llx: Unbacked page\n\n", (map->start_pfn + + data->offset) << PAGE_SHIFT); + goto out; + } + + if (!(map->flags & KBASE_REG_CPU_CACHED)) + prot = pgprot_writecombine(prot); + + page = phys_to_page(as_phys_addr_t(map->alloc->pages[data->offset])); + mapping = vmap(&page, 1, VM_MAP, prot); + if (!mapping) + goto out; + + for (i = 0; i < PAGE_SIZE; i += 4*sizeof(*mapping)) { + seq_printf(m, "%016llx:", i + ((map->start_pfn + + data->offset) << PAGE_SHIFT)); + + for (j = 0; j < 4*sizeof(*mapping); j += sizeof(*mapping)) + seq_printf(m, " %08x", mapping[(i+j)/sizeof(*mapping)]); + seq_putc(m, '\n'); + } + + vunmap(mapping); + + seq_putc(m, '\n'); + +out: + kbase_gpu_vm_unlock(mem_data->kctx); + return 0; +} + +static const struct seq_operations ops = { + .start = debug_mem_start, + .next = debug_mem_next, + .stop = debug_mem_stop, + .show = debug_mem_show, +}; + +static int debug_mem_zone_open(struct rb_root *rbtree, + struct debug_mem_data *mem_data) +{ + int ret = 0; + struct rb_node *p; + struct kbase_va_region *reg; + struct debug_mem_mapping *mapping; + + for (p = rb_first(rbtree); p; p = rb_next(p)) { + reg = rb_entry(p, struct kbase_va_region, rblink); + + if (reg->gpu_alloc == NULL) + /* Empty region - ignore */ + continue; + + mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); + if (!mapping) { + ret = -ENOMEM; + goto out; + } + + mapping->alloc = kbase_mem_phy_alloc_get(reg->gpu_alloc); + mapping->start_pfn = reg->start_pfn; + mapping->nr_pages = reg->nr_pages; + mapping->flags = reg->flags; + list_add_tail(&mapping->node, &mem_data->mapping_list); + } + +out: + return ret; +} + +static int debug_mem_open(struct inode *i, struct file *file) +{ + struct file *kctx_file = i->i_private; + struct kbase_context *kctx = kctx_file->private_data; + struct debug_mem_data *mem_data; + int ret; + + ret = seq_open(file, &ops); + if (ret) + return ret; + + mem_data = kmalloc(sizeof(*mem_data), GFP_KERNEL); + if (!mem_data) { + ret = -ENOMEM; + goto out; + } + + mem_data->kctx = kctx; + + INIT_LIST_HEAD(&mem_data->mapping_list); + + get_file(kctx_file); + + kbase_gpu_vm_lock(kctx); + + ret = debug_mem_zone_open(&kctx->reg_rbtree_same, mem_data); + if (0 != ret) { + kbase_gpu_vm_unlock(kctx); + goto out; + } + + ret = debug_mem_zone_open(&kctx->reg_rbtree_exec, mem_data); + if (0 != ret) { + kbase_gpu_vm_unlock(kctx); + goto out; + } + + ret = debug_mem_zone_open(&kctx->reg_rbtree_custom, mem_data); + if (0 != ret) { + kbase_gpu_vm_unlock(kctx); + goto out; + } + + kbase_gpu_vm_unlock(kctx); + + ((struct seq_file *)file->private_data)->private = mem_data; + + return 0; + +out: + if (mem_data) { + while (!list_empty(&mem_data->mapping_list)) { + struct debug_mem_mapping *mapping; + + mapping = list_first_entry(&mem_data->mapping_list, + struct debug_mem_mapping, node); + kbase_mem_phy_alloc_put(mapping->alloc); + list_del(&mapping->node); + kfree(mapping); + } + fput(kctx_file); + kfree(mem_data); + } + seq_release(i, file); + return ret; +} + +static int debug_mem_release(struct inode *inode, struct file *file) +{ + struct file *kctx_file = inode->i_private; + struct seq_file *sfile = file->private_data; + struct debug_mem_data *mem_data = sfile->private; + struct debug_mem_mapping *mapping; + + seq_release(inode, file); + + while (!list_empty(&mem_data->mapping_list)) { + mapping = list_first_entry(&mem_data->mapping_list, + struct debug_mem_mapping, node); + kbase_mem_phy_alloc_put(mapping->alloc); + list_del(&mapping->node); + kfree(mapping); + } + + kfree(mem_data); + + fput(kctx_file); + + return 0; +} + +static const struct file_operations kbase_debug_mem_view_fops = { + .open = debug_mem_open, + .release = debug_mem_release, + .read = seq_read, + .llseek = seq_lseek +}; + +/** + * kbase_debug_mem_view_init - Initialise the mem_view sysfs file + * @kctx_file: The /dev/mali0 file instance for the context + * + * This function creates a "mem_view" file which can be used to get a view of + * the context's memory as the GPU sees it (i.e. using the GPU's page tables). + * + * The file is cleaned up by a call to debugfs_remove_recursive() deleting the + * parent directory. + */ +void kbase_debug_mem_view_init(struct file *kctx_file) +{ + struct kbase_context *kctx = kctx_file->private_data; + + debugfs_create_file("mem_view", S_IRUSR, kctx->kctx_dentry, kctx_file, + &kbase_debug_mem_view_fops); +} + +#endif diff --git a/drivers/gpu/arm/midgard/mali_kbase_debug_mem_view.h b/drivers/gpu/arm/midgard/mali_kbase_debug_mem_view.h new file mode 100644 index 00000000000000..20ab51a776c6c9 --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_debug_mem_view.h @@ -0,0 +1,25 @@ +/* + * + * (C) COPYRIGHT 2013-2014 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +#ifndef _KBASE_DEBUG_MEM_VIEW_H +#define _KBASE_DEBUG_MEM_VIEW_H + +#include + +void kbase_debug_mem_view_init(struct file *kctx_file); + +#endif diff --git a/drivers/gpu/arm/midgard/mali_kbase_defs.h b/drivers/gpu/arm/midgard/mali_kbase_defs.h new file mode 100644 index 00000000000000..335d361e112c3f --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_defs.h @@ -0,0 +1,1629 @@ +/* + * + * (C) COPYRIGHT 2011-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + + +/** + * @file mali_kbase_defs.h + * + * Defintions (types, defines, etcs) common to Kbase. They are placed here to + * allow the hierarchy of header files to work. + */ + +#ifndef _KBASE_DEFS_H_ +#define _KBASE_DEFS_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#ifdef CONFIG_MALI_FPGA_BUS_LOGGER +#include +#endif + + +#if defined(CONFIG_SYNC) +#include +#else +#include "mali_kbase_fence_defs.h" +#endif + +#ifdef CONFIG_DEBUG_FS +#include +#endif /* CONFIG_DEBUG_FS */ + +#ifdef CONFIG_MALI_DEVFREQ +#include +#endif /* CONFIG_MALI_DEVFREQ */ + +#include +#include + +#if defined(CONFIG_PM_RUNTIME) || \ + (defined(CONFIG_PM) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) +#define KBASE_PM_RUNTIME 1 +#endif + +/** Enable SW tracing when set */ +#ifdef CONFIG_MALI_MIDGARD_ENABLE_TRACE +#define KBASE_TRACE_ENABLE 1 +#endif + +#ifndef KBASE_TRACE_ENABLE +#ifdef CONFIG_MALI_DEBUG +#define KBASE_TRACE_ENABLE 1 +#else +#define KBASE_TRACE_ENABLE 0 +#endif /* CONFIG_MALI_DEBUG */ +#endif /* KBASE_TRACE_ENABLE */ + +/** Dump Job slot trace on error (only active if KBASE_TRACE_ENABLE != 0) */ +#define KBASE_TRACE_DUMP_ON_JOB_SLOT_ERROR 1 + +/** + * Number of milliseconds before resetting the GPU when a job cannot be "zapped" from the hardware. + * Note that the time is actually ZAP_TIMEOUT+SOFT_STOP_RESET_TIMEOUT between the context zap starting and the GPU + * actually being reset to give other contexts time for their jobs to be soft-stopped and removed from the hardware + * before resetting. + */ +#define ZAP_TIMEOUT 1000 + +/** Number of milliseconds before we time out on a GPU soft/hard reset */ +#define RESET_TIMEOUT 500 + +/** + * Prevent soft-stops from occuring in scheduling situations + * + * This is not due to HW issues, but when scheduling is desired to be more predictable. + * + * Therefore, soft stop may still be disabled due to HW issues. + * + * @note Soft stop will still be used for non-scheduling purposes e.g. when terminating a context. + * + * @note if not in use, define this value to 0 instead of \#undef'ing it + */ +#define KBASE_DISABLE_SCHEDULING_SOFT_STOPS 0 + +/** + * Prevent hard-stops from occuring in scheduling situations + * + * This is not due to HW issues, but when scheduling is desired to be more predictable. + * + * @note Hard stop will still be used for non-scheduling purposes e.g. when terminating a context. + * + * @note if not in use, define this value to 0 instead of \#undef'ing it + */ +#define KBASE_DISABLE_SCHEDULING_HARD_STOPS 0 + +/** + * The maximum number of Job Slots to support in the Hardware. + * + * You can optimize this down if your target devices will only ever support a + * small number of job slots. + */ +#define BASE_JM_MAX_NR_SLOTS 3 + +/** + * The maximum number of Address Spaces to support in the Hardware. + * + * You can optimize this down if your target devices will only ever support a + * small number of Address Spaces + */ +#define BASE_MAX_NR_AS 16 + +/* mmu */ +#define MIDGARD_MMU_VA_BITS 48 + +#define MIDGARD_MMU_LEVEL(x) (x) + +#if MIDGARD_MMU_VA_BITS > 39 +#define MIDGARD_MMU_TOPLEVEL MIDGARD_MMU_LEVEL(0) +#else +#define MIDGARD_MMU_TOPLEVEL MIDGARD_MMU_LEVEL(1) +#endif + +#define MIDGARD_MMU_BOTTOMLEVEL MIDGARD_MMU_LEVEL(3) + +#define GROWABLE_FLAGS_REQUIRED (KBASE_REG_PF_GROW | KBASE_REG_GPU_WR) + +/** setting in kbase_context::as_nr that indicates it's invalid */ +#define KBASEP_AS_NR_INVALID (-1) + +#define KBASE_LOCK_REGION_MAX_SIZE (63) +#define KBASE_LOCK_REGION_MIN_SIZE (11) + +#define KBASE_TRACE_SIZE_LOG2 8 /* 256 entries */ +#define KBASE_TRACE_SIZE (1 << KBASE_TRACE_SIZE_LOG2) +#define KBASE_TRACE_MASK ((1 << KBASE_TRACE_SIZE_LOG2)-1) + +#include "mali_kbase_js_defs.h" +#include "mali_kbase_hwaccess_defs.h" + +#define KBASEP_FORCE_REPLAY_DISABLED 0 + +/* Maximum force replay limit when randomization is enabled */ +#define KBASEP_FORCE_REPLAY_RANDOM_LIMIT 16 + +/** Atom has been previously soft-stoppped */ +#define KBASE_KATOM_FLAG_BEEN_SOFT_STOPPPED (1<<1) +/** Atom has been previously retried to execute */ +#define KBASE_KATOM_FLAGS_RERUN (1<<2) +#define KBASE_KATOM_FLAGS_JOBCHAIN (1<<3) +/** Atom has been previously hard-stopped. */ +#define KBASE_KATOM_FLAG_BEEN_HARD_STOPPED (1<<4) +/** Atom has caused us to enter disjoint state */ +#define KBASE_KATOM_FLAG_IN_DISJOINT (1<<5) +/* Atom blocked on cross-slot dependency */ +#define KBASE_KATOM_FLAG_X_DEP_BLOCKED (1<<7) +/* Atom has fail dependency on cross-slot dependency */ +#define KBASE_KATOM_FLAG_FAIL_BLOCKER (1<<8) +/* Atom is currently in the list of atoms blocked on cross-slot dependencies */ +#define KBASE_KATOM_FLAG_JSCTX_IN_X_DEP_LIST (1<<9) +/* Atom is currently holding a context reference */ +#define KBASE_KATOM_FLAG_HOLDING_CTX_REF (1<<10) +/* Atom requires GPU to be in protected mode */ +#define KBASE_KATOM_FLAG_PROTECTED (1<<11) +/* Atom has been stored in runnable_tree */ +#define KBASE_KATOM_FLAG_JSCTX_IN_TREE (1<<12) + +/* SW related flags about types of JS_COMMAND action + * NOTE: These must be masked off by JS_COMMAND_MASK */ + +/** This command causes a disjoint event */ +#define JS_COMMAND_SW_CAUSES_DISJOINT 0x100 + +/** Bitmask of all SW related flags */ +#define JS_COMMAND_SW_BITS (JS_COMMAND_SW_CAUSES_DISJOINT) + +#if (JS_COMMAND_SW_BITS & JS_COMMAND_MASK) +#error JS_COMMAND_SW_BITS not masked off by JS_COMMAND_MASK. Must update JS_COMMAND_SW_<..> bitmasks +#endif + +/** Soft-stop command that causes a Disjoint event. This of course isn't + * entirely masked off by JS_COMMAND_MASK */ +#define JS_COMMAND_SOFT_STOP_WITH_SW_DISJOINT \ + (JS_COMMAND_SW_CAUSES_DISJOINT | JS_COMMAND_SOFT_STOP) + +#define KBASEP_ATOM_ID_INVALID BASE_JD_ATOM_COUNT + +/* Serialize atoms within a slot (ie only one atom per job slot) */ +#define KBASE_SERIALIZE_INTRA_SLOT (1 << 0) +/* Serialize atoms between slots (ie only one job slot running at any time) */ +#define KBASE_SERIALIZE_INTER_SLOT (1 << 1) +/* Reset the GPU after each atom completion */ +#define KBASE_SERIALIZE_RESET (1 << 2) + +/* Forward declarations */ +struct kbase_context; +struct kbase_device; +struct kbase_as; +struct kbase_mmu_setup; + +#ifdef CONFIG_DEBUG_FS +struct base_job_fault_event { + + u32 event_code; + struct kbase_jd_atom *katom; + struct work_struct job_fault_work; + struct list_head head; + int reg_offset; +}; + +#endif + +struct kbase_jd_atom_dependency { + struct kbase_jd_atom *atom; + u8 dep_type; +}; + +/** + * struct kbase_io_access - holds information about 1 register access + * + * @addr: first bit indicates r/w (r=0, w=1) + * @value: value written or read + */ +struct kbase_io_access { + uintptr_t addr; + u32 value; +}; + +/** + * struct kbase_io_history - keeps track of all recent register accesses + * + * @enabled: true if register accesses are recorded, false otherwise + * @lock: spinlock protecting kbase_io_access array + * @count: number of registers read/written + * @size: number of elements in kbase_io_access array + * @buf: array of kbase_io_access + */ +struct kbase_io_history { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) + bool enabled; +#else + u32 enabled; +#endif + + spinlock_t lock; + size_t count; + u16 size; + struct kbase_io_access *buf; +}; + +/** + * @brief The function retrieves a read-only reference to the atom field from + * the kbase_jd_atom_dependency structure + * + * @param[in] dep kbase jd atom dependency. + * + * @return readonly reference to dependent ATOM. + */ +static inline const struct kbase_jd_atom * kbase_jd_katom_dep_atom(const struct kbase_jd_atom_dependency *dep) +{ + LOCAL_ASSERT(dep != NULL); + + return (const struct kbase_jd_atom *)(dep->atom); +} + +/** + * @brief The function retrieves a read-only reference to the dependency type field from + * the kbase_jd_atom_dependency structure + * + * @param[in] dep kbase jd atom dependency. + * + * @return A dependency type value. + */ +static inline u8 kbase_jd_katom_dep_type(const struct kbase_jd_atom_dependency *dep) +{ + LOCAL_ASSERT(dep != NULL); + + return dep->dep_type; +} + +/** + * @brief Setter macro for dep_atom array entry in kbase_jd_atom + * + * @param[in] dep The kbase jd atom dependency. + * @param[in] a The ATOM to be set as a dependency. + * @param type The ATOM dependency type to be set. + * + */ +static inline void kbase_jd_katom_dep_set(const struct kbase_jd_atom_dependency *const_dep, + struct kbase_jd_atom *a, u8 type) +{ + struct kbase_jd_atom_dependency *dep; + + LOCAL_ASSERT(const_dep != NULL); + + dep = (struct kbase_jd_atom_dependency *)const_dep; + + dep->atom = a; + dep->dep_type = type; +} + +/** + * @brief Setter macro for dep_atom array entry in kbase_jd_atom + * + * @param[in] dep The kbase jd atom dependency to be cleared. + * + */ +static inline void kbase_jd_katom_dep_clear(const struct kbase_jd_atom_dependency *const_dep) +{ + struct kbase_jd_atom_dependency *dep; + + LOCAL_ASSERT(const_dep != NULL); + + dep = (struct kbase_jd_atom_dependency *)const_dep; + + dep->atom = NULL; + dep->dep_type = BASE_JD_DEP_TYPE_INVALID; +} + +enum kbase_atom_gpu_rb_state { + /* Atom is not currently present in slot ringbuffer */ + KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB, + /* Atom is in slot ringbuffer but is blocked on a previous atom */ + KBASE_ATOM_GPU_RB_WAITING_BLOCKED, + /* Atom is in slot ringbuffer but is waiting for a previous protected + * mode transition to complete */ + KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_PREV, + /* Atom is in slot ringbuffer but is waiting for proected mode + * transition */ + KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_TRANSITION, + /* Atom is in slot ringbuffer but is waiting for cores to become + * available */ + KBASE_ATOM_GPU_RB_WAITING_FOR_CORE_AVAILABLE, + /* Atom is in slot ringbuffer but is blocked on affinity */ + KBASE_ATOM_GPU_RB_WAITING_AFFINITY, + /* Atom is in slot ringbuffer and ready to run */ + KBASE_ATOM_GPU_RB_READY, + /* Atom is in slot ringbuffer and has been submitted to the GPU */ + KBASE_ATOM_GPU_RB_SUBMITTED, + /* Atom must be returned to JS as soon as it reaches the head of the + * ringbuffer due to a previous failure */ + KBASE_ATOM_GPU_RB_RETURN_TO_JS = -1 +}; + +enum kbase_atom_enter_protected_state { + /* + * Starting state: + * Check if a transition into protected mode is required. + * + * NOTE: The integer value of this must + * match KBASE_ATOM_EXIT_PROTECTED_CHECK. + */ + KBASE_ATOM_ENTER_PROTECTED_CHECK = 0, + /* Wait for vinstr to suspend. */ + KBASE_ATOM_ENTER_PROTECTED_VINSTR, + /* Wait for the L2 to become idle in preparation for + * the coherency change. */ + KBASE_ATOM_ENTER_PROTECTED_IDLE_L2, + /* End state; + * Prepare coherency change. */ + KBASE_ATOM_ENTER_PROTECTED_FINISHED, +}; + +enum kbase_atom_exit_protected_state { + /* + * Starting state: + * Check if a transition out of protected mode is required. + * + * NOTE: The integer value of this must + * match KBASE_ATOM_ENTER_PROTECTED_CHECK. + */ + KBASE_ATOM_EXIT_PROTECTED_CHECK = 0, + /* Wait for the L2 to become idle in preparation + * for the reset. */ + KBASE_ATOM_EXIT_PROTECTED_IDLE_L2, + /* Issue the protected reset. */ + KBASE_ATOM_EXIT_PROTECTED_RESET, + /* End state; + * Wait for the reset to complete. */ + KBASE_ATOM_EXIT_PROTECTED_RESET_WAIT, +}; + +struct kbase_ext_res { + u64 gpu_address; + struct kbase_mem_phy_alloc *alloc; +}; + +struct kbase_jd_atom { + struct work_struct work; + ktime_t start_timestamp; + + struct base_jd_udata udata; + struct kbase_context *kctx; + + struct list_head dep_head[2]; + struct list_head dep_item[2]; + const struct kbase_jd_atom_dependency dep[2]; + /* List head used during job dispatch job_done processing - as + * dependencies may not be entirely resolved at this point, we need to + * use a separate list head. */ + struct list_head jd_item; + /* true if atom's jd_item is currently on a list. Prevents atom being + * processed twice. */ + bool in_jd_list; + + u16 nr_extres; + struct kbase_ext_res *extres; + + u32 device_nr; + u64 affinity; + u64 jc; + enum kbase_atom_coreref_state coreref_state; +#if defined(CONFIG_SYNC) + /* Stores either an input or output fence, depending on soft-job type */ + struct sync_fence *fence; + struct sync_fence_waiter sync_waiter; +#endif /* CONFIG_SYNC */ +#if defined(CONFIG_MALI_DMA_FENCE) || defined(CONFIG_SYNC_FILE) + struct { + /* Use the functions/API defined in mali_kbase_fence.h to + * when working with this sub struct */ +#if defined(CONFIG_SYNC_FILE) + /* Input fence */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)) + struct fence *fence_in; +#else + struct dma_fence *fence_in; +#endif +#endif + /* This points to the dma-buf output fence for this atom. If + * this is NULL then there is no fence for this atom and the + * following fields related to dma_fence may have invalid data. + * + * The context and seqno fields contain the details for this + * fence. + * + * This fence is signaled when the katom is completed, + * regardless of the event_code of the katom (signal also on + * failure). + */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)) + struct fence *fence; +#else + struct dma_fence *fence; +#endif + /* The dma-buf fence context number for this atom. A unique + * context number is allocated to each katom in the context on + * context creation. + */ + unsigned int context; + /* The dma-buf fence sequence number for this atom. This is + * increased every time this katom uses dma-buf fence. + */ + atomic_t seqno; + /* This contains a list of all callbacks set up to wait on + * other fences. This atom must be held back from JS until all + * these callbacks have been called and dep_count have reached + * 0. The initial value of dep_count must be equal to the + * number of callbacks on this list. + * + * This list is protected by jctx.lock. Callbacks are added to + * this list when the atom is built and the wait are set up. + * All the callbacks then stay on the list until all callbacks + * have been called and the atom is queued, or cancelled, and + * then all callbacks are taken off the list and freed. + */ + struct list_head callbacks; + /* Atomic counter of number of outstandind dma-buf fence + * dependencies for this atom. When dep_count reaches 0 the + * atom may be queued. + * + * The special value "-1" may only be set after the count + * reaches 0, while holding jctx.lock. This indicates that the + * atom has been handled, either queued in JS or cancelled. + * + * If anyone but the dma-fence worker sets this to -1 they must + * ensure that any potentially queued worker must have + * completed before allowing the atom to be marked as unused. + * This can be done by flushing the fence work queue: + * kctx->dma_fence.wq. + */ + atomic_t dep_count; + } dma_fence; +#endif /* CONFIG_MALI_DMA_FENCE || CONFIG_SYNC_FILE*/ + + /* Note: refer to kbasep_js_atom_retained_state, which will take a copy of some of the following members */ + enum base_jd_event_code event_code; + base_jd_core_req core_req; /**< core requirements */ + /** Job Slot to retry submitting to if submission from IRQ handler failed + * + * NOTE: see if this can be unified into the another member e.g. the event */ + int retry_submit_on_slot; + + u32 ticks; + /* JS atom priority with respect to other atoms on its kctx. */ + int sched_priority; + + int poking; /* BASE_HW_ISSUE_8316 */ + + wait_queue_head_t completed; + enum kbase_jd_atom_state status; +#ifdef CONFIG_GPU_TRACEPOINTS + int work_id; +#endif + /* Assigned after atom is completed. Used to check whether PRLAM-10676 workaround should be applied */ + int slot_nr; + + u32 atom_flags; + + /* Number of times this atom has been retried. Used by replay soft job. + */ + int retry_count; + + enum kbase_atom_gpu_rb_state gpu_rb_state; + + u64 need_cache_flush_cores_retained; + + atomic_t blocked; + + /* Pointer to atom that this atom has same-slot dependency on */ + struct kbase_jd_atom *pre_dep; + /* Pointer to atom that has same-slot dependency on this atom */ + struct kbase_jd_atom *post_dep; + + /* Pointer to atom that this atom has cross-slot dependency on */ + struct kbase_jd_atom *x_pre_dep; + /* Pointer to atom that has cross-slot dependency on this atom */ + struct kbase_jd_atom *x_post_dep; + + /* The GPU's flush count recorded at the time of submission, used for + * the cache flush optimisation */ + u32 flush_id; + + struct kbase_jd_atom_backend backend; +#ifdef CONFIG_DEBUG_FS + struct base_job_fault_event fault_event; +#endif + + /* List head used for three different purposes: + * 1. Overflow list for JS ring buffers. If an atom is ready to run, + * but there is no room in the JS ring buffer, then the atom is put + * on the ring buffer's overflow list using this list node. + * 2. List of waiting soft jobs. + */ + struct list_head queue; + + /* Used to keep track of all JIT free/alloc jobs in submission order + */ + struct list_head jit_node; + bool jit_blocked; + + /* If non-zero, this indicates that the atom will fail with the set + * event_code when the atom is processed. */ + enum base_jd_event_code will_fail_event_code; + + /* Atoms will only ever be transitioning into, or out of + * protected mode so we do not need two separate fields. + */ + union { + enum kbase_atom_enter_protected_state enter; + enum kbase_atom_exit_protected_state exit; + } protected_state; + + struct rb_node runnable_tree_node; + + /* 'Age' of atom relative to other atoms in the context. */ + u32 age; +}; + +static inline bool kbase_jd_katom_is_protected(const struct kbase_jd_atom *katom) +{ + return (bool)(katom->atom_flags & KBASE_KATOM_FLAG_PROTECTED); +} + +/* + * Theory of operations: + * + * Atom objects are statically allocated within the context structure. + * + * Each atom is the head of two lists, one for the "left" set of dependencies, one for the "right" set. + */ + +#define KBASE_JD_DEP_QUEUE_SIZE 256 + +struct kbase_jd_context { + struct mutex lock; + struct kbasep_js_kctx_info sched_info; + struct kbase_jd_atom atoms[BASE_JD_ATOM_COUNT]; + + /** Tracks all job-dispatch jobs. This includes those not tracked by + * the scheduler: 'not ready to run' and 'dependency-only' jobs. */ + u32 job_nr; + + /** Waitq that reflects whether there are no jobs (including SW-only + * dependency jobs). This is set when no jobs are present on the ctx, + * and clear when there are jobs. + * + * @note: Job Dispatcher knows about more jobs than the Job Scheduler: + * the Job Scheduler is unaware of jobs that are blocked on dependencies, + * and SW-only dependency jobs. + * + * This waitq can be waited upon to find out when the context jobs are all + * done/cancelled (including those that might've been blocked on + * dependencies) - and so, whether it can be terminated. However, it should + * only be terminated once it is not present in the run-pool (see + * kbasep_js_kctx_info::ctx::is_scheduled). + * + * Since the waitq is only set under kbase_jd_context::lock, + * the waiter should also briefly obtain and drop kbase_jd_context::lock to + * guarentee that the setter has completed its work on the kbase_context + * + * This must be updated atomically with: + * - kbase_jd_context::job_nr */ + wait_queue_head_t zero_jobs_wait; + + /** Job Done workqueue. */ + struct workqueue_struct *job_done_wq; + + spinlock_t tb_lock; + u32 *tb; + size_t tb_wrap_offset; + +#ifdef CONFIG_GPU_TRACEPOINTS + atomic_t work_id; +#endif +}; + +struct kbase_device_info { + u32 features; +}; + +/** Poking state for BASE_HW_ISSUE_8316 */ +enum { + KBASE_AS_POKE_STATE_IN_FLIGHT = 1<<0, + KBASE_AS_POKE_STATE_KILLING_POKE = 1<<1 +}; + +/** Poking state for BASE_HW_ISSUE_8316 */ +typedef u32 kbase_as_poke_state; + +struct kbase_mmu_setup { + u64 transtab; + u64 memattr; + u64 transcfg; +}; + +/** + * Important: Our code makes assumptions that a struct kbase_as structure is always at + * kbase_device->as[number]. This is used to recover the containing + * struct kbase_device from a struct kbase_as structure. + * + * Therefore, struct kbase_as structures must not be allocated anywhere else. + */ +struct kbase_as { + int number; + + struct workqueue_struct *pf_wq; + struct work_struct work_pagefault; + struct work_struct work_busfault; + enum kbase_mmu_fault_type fault_type; + bool protected_mode; + u32 fault_status; + u64 fault_addr; + u64 fault_extra_addr; + + struct kbase_mmu_setup current_setup; + + /* BASE_HW_ISSUE_8316 */ + struct workqueue_struct *poke_wq; + struct work_struct poke_work; + /** Protected by hwaccess_lock */ + int poke_refcount; + /** Protected by hwaccess_lock */ + kbase_as_poke_state poke_state; + struct hrtimer poke_timer; +}; + +static inline int kbase_as_has_bus_fault(struct kbase_as *as) +{ + return as->fault_type == KBASE_MMU_FAULT_TYPE_BUS; +} + +static inline int kbase_as_has_page_fault(struct kbase_as *as) +{ + return as->fault_type == KBASE_MMU_FAULT_TYPE_PAGE; +} + +struct kbasep_mem_device { + atomic_t used_pages; /* Tracks usage of OS shared memory. Updated + when OS memory is allocated/freed. */ + +}; + +#define KBASE_TRACE_CODE(X) KBASE_TRACE_CODE_ ## X + +enum kbase_trace_code { + /* IMPORTANT: USE OF SPECIAL #INCLUDE OF NON-STANDARD HEADER FILE + * THIS MUST BE USED AT THE START OF THE ENUM */ +#define KBASE_TRACE_CODE_MAKE_CODE(X) KBASE_TRACE_CODE(X) +#include "mali_kbase_trace_defs.h" +#undef KBASE_TRACE_CODE_MAKE_CODE + /* Comma on its own, to extend the list */ + , + /* Must be the last in the enum */ + KBASE_TRACE_CODE_COUNT +}; + +#define KBASE_TRACE_FLAG_REFCOUNT (((u8)1) << 0) +#define KBASE_TRACE_FLAG_JOBSLOT (((u8)1) << 1) + +struct kbase_trace { +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0) + struct timespec64 timestamp; +#else + struct timespec timestamp; +#endif + u32 thread_id; + u32 cpu; + void *ctx; + bool katom; + int atom_number; + u64 atom_udata[2]; + u64 gpu_addr; + unsigned long info_val; + u8 code; + u8 jobslot; + u8 refcount; + u8 flags; +}; + +/** Event IDs for the power management framework. + * + * Any of these events might be missed, so they should not be relied upon to + * find the precise state of the GPU at a particular time in the + * trace. Overall, we should get a high percentage of these events for + * statisical purposes, and so a few missing should not be a problem */ +enum kbase_timeline_pm_event { + /* helper for tests */ + KBASEP_TIMELINE_PM_EVENT_FIRST, + + /** Event reserved for backwards compatibility with 'init' events */ + KBASE_TIMELINE_PM_EVENT_RESERVED_0 = KBASEP_TIMELINE_PM_EVENT_FIRST, + + /** The power state of the device has changed. + * + * Specifically, the device has reached a desired or available state. + */ + KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED, + + /** The GPU is becoming active. + * + * This event is sent when the first context is about to use the GPU. + */ + KBASE_TIMELINE_PM_EVENT_GPU_ACTIVE, + + /** The GPU is becoming idle. + * + * This event is sent when the last context has finished using the GPU. + */ + KBASE_TIMELINE_PM_EVENT_GPU_IDLE, + + /** Event reserved for backwards compatibility with 'policy_change' + * events */ + KBASE_TIMELINE_PM_EVENT_RESERVED_4, + + /** Event reserved for backwards compatibility with 'system_suspend' + * events */ + KBASE_TIMELINE_PM_EVENT_RESERVED_5, + + /** Event reserved for backwards compatibility with 'system_resume' + * events */ + KBASE_TIMELINE_PM_EVENT_RESERVED_6, + + /** The job scheduler is requesting to power up/down cores. + * + * This event is sent when: + * - powered down cores are needed to complete a job + * - powered up cores are not needed anymore + */ + KBASE_TIMELINE_PM_EVENT_CHANGE_GPU_STATE, + + KBASEP_TIMELINE_PM_EVENT_LAST = KBASE_TIMELINE_PM_EVENT_CHANGE_GPU_STATE, +}; + +#ifdef CONFIG_MALI_TRACE_TIMELINE +struct kbase_trace_kctx_timeline { + atomic_t jd_atoms_in_flight; + u32 owner_tgid; +}; + +struct kbase_trace_kbdev_timeline { + /* Note: strictly speaking, not needed, because it's in sync with + * kbase_device::jm_slots[]::submitted_nr + * + * But it's kept as an example of how to add global timeline tracking + * information + * + * The caller must hold hwaccess_lock when accessing this */ + u8 slot_atoms_submitted[BASE_JM_MAX_NR_SLOTS]; + + /* Last UID for each PM event */ + atomic_t pm_event_uid[KBASEP_TIMELINE_PM_EVENT_LAST+1]; + /* Counter for generating PM event UIDs */ + atomic_t pm_event_uid_counter; + /* + * L2 transition state - true indicates that the transition is ongoing + * Expected to be protected by hwaccess_lock */ + bool l2_transitioning; +}; +#endif /* CONFIG_MALI_TRACE_TIMELINE */ + + +struct kbasep_kctx_list_element { + struct list_head link; + struct kbase_context *kctx; +}; + +/** + * Data stored per device for power management. + * + * This structure contains data for the power management framework. There is one + * instance of this structure per device in the system. + */ +struct kbase_pm_device_data { + /** + * The lock protecting Power Management structures accessed outside of + * IRQ. + * + * This lock must also be held whenever the GPU is being powered on or + * off. + */ + struct mutex lock; + + /** The reference count of active contexts on this device. */ + int active_count; + /** Flag indicating suspending/suspended */ + bool suspending; + /* Wait queue set when active_count == 0 */ + wait_queue_head_t zero_active_count_wait; + + /** + * Bit masks identifying the available shader cores that are specified + * via sysfs. One mask per job slot. + */ + u64 debug_core_mask[BASE_JM_MAX_NR_SLOTS]; + u64 debug_core_mask_all; + + /** + * Callback for initializing the runtime power management. + * + * @param kbdev The kbase device + * + * @return 0 on success, else error code + */ + int (*callback_power_runtime_init)(struct kbase_device *kbdev); + + /** + * Callback for terminating the runtime power management. + * + * @param kbdev The kbase device + */ + void (*callback_power_runtime_term)(struct kbase_device *kbdev); + + /* Time in milliseconds between each dvfs sample */ + u32 dvfs_period; + + /* Period of GPU poweroff timer */ + ktime_t gpu_poweroff_time; + + /* Number of ticks of GPU poweroff timer before shader is powered off */ + int poweroff_shader_ticks; + + /* Number of ticks of GPU poweroff timer before GPU is powered off */ + int poweroff_gpu_ticks; + + struct kbase_pm_backend_data backend; +}; + +/** + * struct kbase_mem_pool - Page based memory pool for kctx/kbdev + * @kbdev: Kbase device where memory is used + * @cur_size: Number of free pages currently in the pool (may exceed @max_size + * in some corner cases) + * @max_size: Maximum number of free pages in the pool + * @order: order = 0 refers to a pool of 4 KB pages + * order = 9 refers to a pool of 2 MB pages (2^9 * 4KB = 2 MB) + * @pool_lock: Lock protecting the pool - must be held when modifying @cur_size + * and @page_list + * @page_list: List of free pages in the pool + * @reclaim: Shrinker for kernel reclaim of free pages + * @next_pool: Pointer to next pool where pages can be allocated when this pool + * is empty. Pages will spill over to the next pool when this pool + * is full. Can be NULL if there is no next pool. + */ +struct kbase_mem_pool { + struct kbase_device *kbdev; + size_t cur_size; + size_t max_size; + size_t order; + spinlock_t pool_lock; + struct list_head page_list; + struct shrinker reclaim; + + struct kbase_mem_pool *next_pool; +}; + +/** + * struct kbase_devfreq_opp - Lookup table for converting between nominal OPP + * frequency, and real frequency and core mask + * @opp_freq: Nominal OPP frequency + * @real_freq: Real GPU frequency + * @core_mask: Shader core mask + */ +struct kbase_devfreq_opp { + u64 opp_freq; + u64 real_freq; + u64 core_mask; +}; + +struct kbase_mmu_mode { + void (*update)(struct kbase_context *kctx); + void (*get_as_setup)(struct kbase_context *kctx, + struct kbase_mmu_setup * const setup); + void (*disable_as)(struct kbase_device *kbdev, int as_nr); + phys_addr_t (*pte_to_phy_addr)(u64 entry); + int (*ate_is_valid)(u64 ate, unsigned int level); + int (*pte_is_valid)(u64 pte, unsigned int level); + void (*entry_set_ate)(u64 *entry, struct tagged_addr phy, + unsigned long flags, unsigned int level); + void (*entry_set_pte)(u64 *entry, phys_addr_t phy); + void (*entry_invalidate)(u64 *entry); +}; + +struct kbase_mmu_mode const *kbase_mmu_mode_get_lpae(void); +struct kbase_mmu_mode const *kbase_mmu_mode_get_aarch64(void); + + +#define DEVNAME_SIZE 16 + +struct kbase_device { + s8 slot_submit_count_irq[BASE_JM_MAX_NR_SLOTS]; + + u32 hw_quirks_sc; + u32 hw_quirks_tiler; + u32 hw_quirks_mmu; + u32 hw_quirks_jm; + + struct list_head entry; + struct device *dev; + struct miscdevice mdev; + u64 reg_start; + size_t reg_size; + void __iomem *reg; + + struct { + int irq; + int flags; + } irqs[3]; + + struct clk *clock; +#ifdef CONFIG_REGULATOR + struct regulator *regulator; +#endif + char devname[DEVNAME_SIZE]; + +#ifdef CONFIG_MALI_NO_MALI + void *model; + struct kmem_cache *irq_slab; + struct workqueue_struct *irq_workq; + atomic_t serving_job_irq; + atomic_t serving_gpu_irq; + atomic_t serving_mmu_irq; + spinlock_t reg_op_lock; +#endif /* CONFIG_MALI_NO_MALI */ + + struct kbase_pm_device_data pm; + struct kbasep_js_device_data js_data; + struct kbase_mem_pool mem_pool; + struct kbase_mem_pool lp_mem_pool; + struct kbasep_mem_device memdev; + struct kbase_mmu_mode const *mmu_mode; + + struct kbase_as as[BASE_MAX_NR_AS]; + /* The below variables (as_free and as_to_kctx) are managed by the + * Context Scheduler. The kbasep_js_device_data::runpool_irq::lock must + * be held whilst accessing these. + */ + u16 as_free; /* Bitpattern of free Address Spaces */ + /* Mapping from active Address Spaces to kbase_context */ + struct kbase_context *as_to_kctx[BASE_MAX_NR_AS]; + + + spinlock_t mmu_mask_change; + + struct kbase_gpu_props gpu_props; + + /** List of SW workarounds for HW issues */ + unsigned long hw_issues_mask[(BASE_HW_ISSUE_END + BITS_PER_LONG - 1) / BITS_PER_LONG]; + /** List of features available */ + unsigned long hw_features_mask[(BASE_HW_FEATURE_END + BITS_PER_LONG - 1) / BITS_PER_LONG]; + + /* Bitmaps of cores that are currently in use (running jobs). + * These should be kept up to date by the job scheduler. + * + * pm.power_change_lock should be held when accessing these members. + * + * kbase_pm_check_transitions_nolock() should be called when bits are + * cleared to update the power management system and allow transitions to + * occur. */ + u64 shader_inuse_bitmap; + + /* Refcount for cores in use */ + u32 shader_inuse_cnt[64]; + + /* Bitmaps of cores the JS needs for jobs ready to run */ + u64 shader_needed_bitmap; + + /* Refcount for cores needed */ + u32 shader_needed_cnt[64]; + + u32 tiler_inuse_cnt; + + u32 tiler_needed_cnt; + + /* struct for keeping track of the disjoint information + * + * The state is > 0 if the GPU is in a disjoint state. Otherwise 0 + * The count is the number of disjoint events that have occurred on the GPU + */ + struct { + atomic_t count; + atomic_t state; + } disjoint_event; + + /* Refcount for tracking users of the l2 cache, e.g. when using hardware counter instrumentation. */ + u32 l2_users_count; + + /* Bitmaps of cores that are currently available (powered up and the power policy is happy for jobs to be + * submitted to these cores. These are updated by the power management code. The job scheduler should avoid + * submitting new jobs to any cores that are not marked as available. + * + * pm.power_change_lock should be held when accessing these members. + */ + u64 shader_available_bitmap; + u64 tiler_available_bitmap; + u64 l2_available_bitmap; + u64 stack_available_bitmap; + + u64 shader_ready_bitmap; + u64 shader_transitioning_bitmap; + + s8 nr_hw_address_spaces; /**< Number of address spaces in the GPU (constant after driver initialisation) */ + s8 nr_user_address_spaces; /**< Number of address spaces available to user contexts */ + + /* Structure used for instrumentation and HW counters dumping */ + struct kbase_hwcnt { + /* The lock should be used when accessing any of the following members */ + spinlock_t lock; + + struct kbase_context *kctx; + u64 addr; + + struct kbase_instr_backend backend; + } hwcnt; + + struct kbase_vinstr_context *vinstr_ctx; + +#if KBASE_TRACE_ENABLE + spinlock_t trace_lock; + u16 trace_first_out; + u16 trace_next_in; + struct kbase_trace *trace_rbuf; +#endif + + u32 reset_timeout_ms; + + struct mutex cacheclean_lock; + + /* Platform specific private data to be accessed by mali_kbase_config_xxx.c only */ + void *platform_context; + + /* List of kbase_contexts created */ + struct list_head kctx_list; + struct mutex kctx_list_lock; + +#ifdef CONFIG_MALI_DEVFREQ + struct devfreq_dev_profile devfreq_profile; + struct devfreq *devfreq; + unsigned long current_freq; + unsigned long current_nominal_freq; + unsigned long current_voltage; + u64 current_core_mask; + struct kbase_devfreq_opp *opp_table; + int num_opps; +#ifdef CONFIG_DEVFREQ_THERMAL +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) + struct devfreq_cooling_device *devfreq_cooling; +#else + struct thermal_cooling_device *devfreq_cooling; +#endif + /* Current IPA model - true for configured model, false for fallback */ + atomic_t ipa_use_configured_model; + struct { + /* Access to this struct must be with ipa.lock held */ + struct mutex lock; + struct kbase_ipa_model *configured_model; + struct kbase_ipa_model *fallback_model; + } ipa; +#endif /* CONFIG_DEVFREQ_THERMAL */ +#endif /* CONFIG_MALI_DEVFREQ */ + + +#ifdef CONFIG_MALI_TRACE_TIMELINE + struct kbase_trace_kbdev_timeline timeline; +#endif + + /* + * Control for enabling job dump on failure, set when control debugfs + * is opened. + */ + bool job_fault_debug; + +#ifdef CONFIG_DEBUG_FS + /* directory for debugfs entries */ + struct dentry *mali_debugfs_directory; + /* Root directory for per context entry */ + struct dentry *debugfs_ctx_directory; + +#ifdef CONFIG_MALI_DEBUG + /* bit for each as, set if there is new data to report */ + u64 debugfs_as_read_bitmap; +#endif /* CONFIG_MALI_DEBUG */ + + /* failed job dump, used for separate debug process */ + wait_queue_head_t job_fault_wq; + wait_queue_head_t job_fault_resume_wq; + struct workqueue_struct *job_fault_resume_workq; + struct list_head job_fault_event_list; + spinlock_t job_fault_event_lock; + struct kbase_context *kctx_fault; + +#if !MALI_CUSTOMER_RELEASE + /* Per-device data for register dumping interface */ + struct { + u16 reg_offset; /* Offset of a GPU_CONTROL register to be + dumped upon request */ + } regs_dump_debugfs_data; +#endif /* !MALI_CUSTOMER_RELEASE */ +#endif /* CONFIG_DEBUG_FS */ + + /* fbdump profiling controls set by gator */ + u32 kbase_profiling_controls[FBDUMP_CONTROL_MAX]; + + +#if MALI_CUSTOMER_RELEASE == 0 + /* Number of jobs that are run before a job is forced to fail and + * replay. May be KBASEP_FORCE_REPLAY_DISABLED, to disable forced + * failures. */ + int force_replay_limit; + /* Count of jobs between forced failures. Incremented on each job. A + * job is forced to fail once this is greater than or equal to + * force_replay_limit. */ + int force_replay_count; + /* Core requirement for jobs to be failed and replayed. May be zero. */ + base_jd_core_req force_replay_core_req; + /* true if force_replay_limit should be randomized. The random + * value will be in the range of 1 - KBASEP_FORCE_REPLAY_RANDOM_LIMIT. + */ + bool force_replay_random; +#endif + + /* Total number of created contexts */ + atomic_t ctx_num; + +#ifdef CONFIG_DEBUG_FS + /* Holds the most recent register accesses */ + struct kbase_io_history io_history; +#endif /* CONFIG_DEBUG_FS */ + + struct kbase_hwaccess_data hwaccess; + + /* Count of page/bus faults waiting for workqueues to process */ + atomic_t faults_pending; + + /* true if GPU is powered off or power off operation is in progress */ + bool poweroff_pending; + + + /* defaults for new context created for this device */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) + bool infinite_cache_active_default; +#else + u32 infinite_cache_active_default; +#endif + size_t mem_pool_max_size_default; + + /* current gpu coherency mode */ + u32 current_gpu_coherency_mode; + /* system coherency mode */ + u32 system_coherency; + /* Flag to track when cci snoops have been enabled on the interface */ + bool cci_snoop_enabled; + + /* SMC function IDs to call into Trusted firmware to enable/disable + * cache snooping. Value of 0 indicates that they are not used + */ + u32 snoop_enable_smc; + u32 snoop_disable_smc; + + /* Protected mode operations */ + struct protected_mode_ops *protected_ops; + + /* Protected device attached to this kbase device */ + struct protected_mode_device *protected_dev; + + /* + * true when GPU is put into protected mode + */ + bool protected_mode; + + /* + * true when GPU is transitioning into or out of protected mode + */ + bool protected_mode_transition; + + /* + * true if protected mode is supported + */ + bool protected_mode_support; + + +#ifdef CONFIG_MALI_DEBUG + wait_queue_head_t driver_inactive_wait; + bool driver_inactive; +#endif /* CONFIG_MALI_DEBUG */ + +#ifdef CONFIG_MALI_FPGA_BUS_LOGGER + /* + * Bus logger integration. + */ + struct bus_logger_client *buslogger; +#endif + /* Boolean indicating if an IRQ flush during reset is in progress. */ + bool irq_reset_flush; + + /* list of inited sub systems. Used during terminate/error recovery */ + u32 inited_subsys; + + spinlock_t hwaccess_lock; + + /* Protects access to MMU operations */ + struct mutex mmu_hw_mutex; + + /* Current serialization mode. See KBASE_SERIALIZE_* for details */ + u8 serialize_jobs; +}; + +/** + * struct jsctx_queue - JS context atom queue + * @runnable_tree: Root of RB-tree containing currently runnable atoms on this + * job slot. + * @x_dep_head: Head item of the linked list of atoms blocked on cross-slot + * dependencies. Atoms on this list will be moved to the + * runnable_tree when the blocking atom completes. + * + * hwaccess_lock must be held when accessing this structure. + */ +struct jsctx_queue { + struct rb_root runnable_tree; + struct list_head x_dep_head; +}; + + +#define KBASE_API_VERSION(major, minor) ((((major) & 0xFFF) << 20) | \ + (((minor) & 0xFFF) << 8) | \ + ((0 & 0xFF) << 0)) + +/** + * enum kbase_context_flags - Flags for kbase contexts + * + * @KCTX_COMPAT: Set when the context process is a compat process, 32-bit + * process on a 64-bit kernel. + * + * @KCTX_RUNNABLE_REF: Set when context is counted in + * kbdev->js_data.nr_contexts_runnable. Must hold queue_mutex when accessing. + * + * @KCTX_ACTIVE: Set when the context is active. + * + * @KCTX_PULLED: Set when last kick() caused atoms to be pulled from this + * context. + * + * @KCTX_MEM_PROFILE_INITIALIZED: Set when the context's memory profile has been + * initialized. + * + * @KCTX_INFINITE_CACHE: Set when infinite cache is to be enabled for new + * allocations. Existing allocations will not change. + * + * @KCTX_SUBMIT_DISABLED: Set to prevent context from submitting any jobs. + * + * @KCTX_PRIVILEGED:Set if the context uses an address space and should be kept + * scheduled in. + * + * @KCTX_SCHEDULED: Set when the context is scheduled on the Run Pool. + * This is only ever updated whilst the jsctx_mutex is held. + * + * @KCTX_DYING: Set when the context process is in the process of being evicted. + * + * @KCTX_NO_IMPLICIT_SYNC: Set when explicit Android fences are in use on this + * context, to disable use of implicit dma-buf fences. This is used to avoid + * potential synchronization deadlocks. + * + * All members need to be separate bits. This enum is intended for use in a + * bitmask where multiple values get OR-ed together. + */ +enum kbase_context_flags { + KCTX_COMPAT = 1U << 0, + KCTX_RUNNABLE_REF = 1U << 1, + KCTX_ACTIVE = 1U << 2, + KCTX_PULLED = 1U << 3, + KCTX_MEM_PROFILE_INITIALIZED = 1U << 4, + KCTX_INFINITE_CACHE = 1U << 5, + KCTX_SUBMIT_DISABLED = 1U << 6, + KCTX_PRIVILEGED = 1U << 7, + KCTX_SCHEDULED = 1U << 8, + KCTX_DYING = 1U << 9, + KCTX_NO_IMPLICIT_SYNC = 1U << 10, +}; + +struct kbase_sub_alloc { + struct list_head link; + struct page *page; + DECLARE_BITMAP(sub_pages, SZ_2M / SZ_4K); +}; + +struct kbase_context { + struct file *filp; + struct kbase_device *kbdev; + u32 id; /* System wide unique id */ + unsigned long api_version; + phys_addr_t pgd; + struct list_head event_list; + struct list_head event_coalesce_list; + struct mutex event_mutex; + atomic_t event_closed; + struct workqueue_struct *event_workq; + atomic_t event_count; + int event_coalesce_count; + + atomic_t flags; + + atomic_t setup_complete; + atomic_t setup_in_progress; + + u64 *mmu_teardown_pages; + + struct tagged_addr aliasing_sink_page; + + struct mutex mem_partials_lock; + struct list_head mem_partials; + + struct mutex mmu_lock; + struct mutex reg_lock; /* To be converted to a rwlock? */ + struct rb_root reg_rbtree_same; /* RB tree of GPU (live) regions, + * SAME_VA zone */ + struct rb_root reg_rbtree_exec; /* RB tree of GPU (live) regions, + * EXEC zone */ + struct rb_root reg_rbtree_custom; /* RB tree of GPU (live) regions, + * CUSTOM_VA zone */ + + unsigned long cookies; + struct kbase_va_region *pending_regions[BITS_PER_LONG]; + + wait_queue_head_t event_queue; + pid_t tgid; + pid_t pid; + + struct kbase_jd_context jctx; + atomic_t used_pages; + atomic_t nonmapped_pages; + + struct kbase_mem_pool mem_pool; + struct kbase_mem_pool lp_mem_pool; + + struct shrinker reclaim; + struct list_head evict_list; + + struct list_head waiting_soft_jobs; + spinlock_t waiting_soft_jobs_lock; +#ifdef CONFIG_MALI_DMA_FENCE + struct { + struct list_head waiting_resource; + struct workqueue_struct *wq; + } dma_fence; +#endif /* CONFIG_MALI_DMA_FENCE */ + /** This is effectively part of the Run Pool, because it only has a valid + * setting (!=KBASEP_AS_NR_INVALID) whilst the context is scheduled in + * + * The hwaccess_lock must be held whilst accessing this. + * + * If the context relating to this as_nr is required, you must use + * kbasep_js_runpool_retain_ctx() to ensure that the context doesn't disappear + * whilst you're using it. Alternatively, just hold the hwaccess_lock + * to ensure the context doesn't disappear (but this has restrictions on what other locks + * you can take whilst doing this) */ + int as_nr; + + /* Keeps track of the number of users of this context. A user can be a + * job that is available for execution, instrumentation needing to 'pin' + * a context for counter collection, etc. If the refcount reaches 0 then + * this context is considered inactive and the previously programmed + * AS might be cleared at any point. + */ + atomic_t refcount; + + /* NOTE: + * + * Flags are in jctx.sched_info.ctx.flags + * Mutable flags *must* be accessed under jctx.sched_info.ctx.jsctx_mutex + * + * All other flags must be added there */ + spinlock_t mm_update_lock; + struct mm_struct *process_mm; + /* End of the SAME_VA zone */ + u64 same_va_end; + +#ifdef CONFIG_MALI_TRACE_TIMELINE + struct kbase_trace_kctx_timeline timeline; +#endif +#ifdef CONFIG_DEBUG_FS + /* Content of mem_profile file */ + char *mem_profile_data; + /* Size of @c mem_profile_data */ + size_t mem_profile_size; + /* Mutex guarding memory profile state */ + struct mutex mem_profile_lock; + /* Memory profile directory under debugfs */ + struct dentry *kctx_dentry; + + /* for job fault debug */ + unsigned int *reg_dump; + atomic_t job_fault_count; + /* This list will keep the following atoms during the dump + * in the same context + */ + struct list_head job_fault_resume_event_list; + +#endif /* CONFIG_DEBUG_FS */ + + struct jsctx_queue jsctx_queue + [KBASE_JS_ATOM_SCHED_PRIO_COUNT][BASE_JM_MAX_NR_SLOTS]; + + /* Number of atoms currently pulled from this context */ + atomic_t atoms_pulled; + /* Number of atoms currently pulled from this context, per slot */ + atomic_t atoms_pulled_slot[BASE_JM_MAX_NR_SLOTS]; + /* Number of atoms currently pulled from this context, per slot and + * priority. Hold hwaccess_lock when accessing */ + int atoms_pulled_slot_pri[BASE_JM_MAX_NR_SLOTS][ + KBASE_JS_ATOM_SCHED_PRIO_COUNT]; + + /* true if slot is blocked on the given priority. This will be set on a + * soft-stop */ + bool blocked_js[BASE_JM_MAX_NR_SLOTS][KBASE_JS_ATOM_SCHED_PRIO_COUNT]; + + /* Bitmask of slots that can be pulled from */ + u32 slots_pullable; + + /* Backend specific data */ + struct kbase_context_backend backend; + + /* Work structure used for deferred ASID assignment */ + struct work_struct work; + + /* Only one userspace vinstr client per kbase context */ + struct kbase_vinstr_client *vinstr_cli; + struct mutex vinstr_cli_lock; + + /* List of completed jobs waiting for events to be posted */ + struct list_head completed_jobs; + /* Number of work items currently pending on job_done_wq */ + atomic_t work_count; + + /* Waiting soft-jobs will fail when this timer expires */ + struct timer_list soft_job_timeout; + + /* JIT allocation management */ + struct kbase_va_region *jit_alloc[256]; + struct list_head jit_active_head; + struct list_head jit_pool_head; + struct list_head jit_destroy_head; + struct mutex jit_evict_lock; + struct work_struct jit_work; + + /* A list of the JIT soft-jobs in submission order + * (protected by kbase_jd_context.lock) + */ + struct list_head jit_atoms_head; + /* A list of pending JIT alloc soft-jobs (using the 'queue' list_head) + * (protected by kbase_jd_context.lock) + */ + struct list_head jit_pending_alloc; + + /* External sticky resource management */ + struct list_head ext_res_meta_head; + + /* Used to record that a drain was requested from atomic context */ + atomic_t drain_pending; + + /* Current age count, used to determine age for newly submitted atoms */ + u32 age_count; +}; + +/** + * struct kbase_ctx_ext_res_meta - Structure which binds an external resource + * to a @kbase_context. + * @ext_res_node: List head for adding the metadata to a + * @kbase_context. + * @alloc: The physical memory allocation structure + * which is mapped. + * @gpu_addr: The GPU virtual address the resource is + * mapped to. + * + * External resources can be mapped into multiple contexts as well as the same + * context multiple times. + * As kbase_va_region itself isn't refcounted we can't attach our extra + * information to it as it could be removed under our feet leaving external + * resources pinned. + * This metadata structure binds a single external resource to a single + * context, ensuring that per context mapping is tracked separately so it can + * be overridden when needed and abuses by the application (freeing the resource + * multiple times) don't effect the refcount of the physical allocation. + */ +struct kbase_ctx_ext_res_meta { + struct list_head ext_res_node; + struct kbase_mem_phy_alloc *alloc; + u64 gpu_addr; +}; + +enum kbase_reg_access_type { + REG_READ, + REG_WRITE +}; + +enum kbase_share_attr_bits { + /* (1ULL << 8) bit is reserved */ + SHARE_BOTH_BITS = (2ULL << 8), /* inner and outer shareable coherency */ + SHARE_INNER_BITS = (3ULL << 8) /* inner shareable coherency */ +}; + +/** + * kbase_device_is_cpu_coherent - Returns if the device is CPU coherent. + * @kbdev: kbase device + * + * Return: true if the device access are coherent, false if not. + */ +static inline bool kbase_device_is_cpu_coherent(struct kbase_device *kbdev) +{ + if ((kbdev->system_coherency == COHERENCY_ACE_LITE) || + (kbdev->system_coherency == COHERENCY_ACE)) + return true; + + return false; +} + +/* Conversion helpers for setting up high resolution timers */ +#define HR_TIMER_DELAY_MSEC(x) (ns_to_ktime(((u64)(x))*1000000U)) +#define HR_TIMER_DELAY_NSEC(x) (ns_to_ktime(x)) + +/* Maximum number of loops polling the GPU for a cache flush before we assume it must have completed */ +#define KBASE_CLEAN_CACHE_MAX_LOOPS 100000 +/* Maximum number of loops polling the GPU for an AS command to complete before we assume the GPU has hung */ +#define KBASE_AS_INACTIVE_MAX_LOOPS 100000 + +/* Maximum number of times a job can be replayed */ +#define BASEP_JD_REPLAY_LIMIT 15 + +/* JobDescriptorHeader - taken from the architecture specifications, the layout + * is currently identical for all GPU archs. */ +struct job_descriptor_header { + u32 exception_status; + u32 first_incomplete_task; + u64 fault_pointer; + u8 job_descriptor_size : 1; + u8 job_type : 7; + u8 job_barrier : 1; + u8 _reserved_01 : 1; + u8 _reserved_1 : 1; + u8 _reserved_02 : 1; + u8 _reserved_03 : 1; + u8 _reserved_2 : 1; + u8 _reserved_04 : 1; + u8 _reserved_05 : 1; + u16 job_index; + u16 job_dependency_index_1; + u16 job_dependency_index_2; + union { + u64 _64; + u32 _32; + } next_job; +}; + +#endif /* _KBASE_DEFS_H_ */ diff --git a/drivers/gpu/arm/midgard/mali_kbase_device.c b/drivers/gpu/arm/midgard/mali_kbase_device.c new file mode 100644 index 00000000000000..b22b6a95964b3a --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_device.c @@ -0,0 +1,677 @@ +/* + * + * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + + +/* + * Base kernel device APIs + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include + +/* NOTE: Magic - 0x45435254 (TRCE in ASCII). + * Supports tracing feature provided in the base module. + * Please keep it in sync with the value of base module. + */ +#define TRACE_BUFFER_HEADER_SPECIAL 0x45435254 + +#if KBASE_TRACE_ENABLE +static const char *kbasep_trace_code_string[] = { + /* IMPORTANT: USE OF SPECIAL #INCLUDE OF NON-STANDARD HEADER FILE + * THIS MUST BE USED AT THE START OF THE ARRAY */ +#define KBASE_TRACE_CODE_MAKE_CODE(X) # X +#include "mali_kbase_trace_defs.h" +#undef KBASE_TRACE_CODE_MAKE_CODE +}; +#endif + +#define DEBUG_MESSAGE_SIZE 256 + +static int kbasep_trace_init(struct kbase_device *kbdev); +static void kbasep_trace_term(struct kbase_device *kbdev); +static void kbasep_trace_hook_wrapper(void *param); + +struct kbase_device *kbase_device_alloc(void) +{ + return kzalloc(sizeof(struct kbase_device), GFP_KERNEL); +} + +static int kbase_device_as_init(struct kbase_device *kbdev, int i) +{ + const char format[] = "mali_mmu%d"; + char name[sizeof(format)]; + const char poke_format[] = "mali_mmu%d_poker"; + char poke_name[sizeof(poke_format)]; + + if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316)) + snprintf(poke_name, sizeof(poke_name), poke_format, i); + + snprintf(name, sizeof(name), format, i); + + kbdev->as[i].number = i; + kbdev->as[i].fault_addr = 0ULL; + + kbdev->as[i].pf_wq = alloc_workqueue(name, 0, 1); + if (!kbdev->as[i].pf_wq) + return -EINVAL; + + INIT_WORK(&kbdev->as[i].work_pagefault, page_fault_worker); + INIT_WORK(&kbdev->as[i].work_busfault, bus_fault_worker); + + if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316)) { + struct hrtimer *poke_timer = &kbdev->as[i].poke_timer; + struct work_struct *poke_work = &kbdev->as[i].poke_work; + + kbdev->as[i].poke_wq = alloc_workqueue(poke_name, 0, 1); + if (!kbdev->as[i].poke_wq) { + destroy_workqueue(kbdev->as[i].pf_wq); + return -EINVAL; + } + INIT_WORK(poke_work, kbasep_as_do_poke); + + hrtimer_init(poke_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + + poke_timer->function = kbasep_as_poke_timer_callback; + + kbdev->as[i].poke_refcount = 0; + kbdev->as[i].poke_state = 0u; + } + + return 0; +} + +static void kbase_device_as_term(struct kbase_device *kbdev, int i) +{ + destroy_workqueue(kbdev->as[i].pf_wq); + if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316)) + destroy_workqueue(kbdev->as[i].poke_wq); +} + +static int kbase_device_all_as_init(struct kbase_device *kbdev) +{ + int i, err; + + for (i = 0; i < kbdev->nr_hw_address_spaces; i++) { + err = kbase_device_as_init(kbdev, i); + if (err) + goto free_workqs; + } + + return 0; + +free_workqs: + for (; i > 0; i--) + kbase_device_as_term(kbdev, i); + + return err; +} + +static void kbase_device_all_as_term(struct kbase_device *kbdev) +{ + int i; + + for (i = 0; i < kbdev->nr_hw_address_spaces; i++) + kbase_device_as_term(kbdev, i); +} + +int kbase_device_init(struct kbase_device * const kbdev) +{ + int i, err; +#ifdef CONFIG_ARM64 + struct device_node *np = NULL; +#endif /* CONFIG_ARM64 */ + + spin_lock_init(&kbdev->mmu_mask_change); + mutex_init(&kbdev->mmu_hw_mutex); +#ifdef CONFIG_ARM64 + kbdev->cci_snoop_enabled = false; + np = kbdev->dev->of_node; + if (np != NULL) { + if (of_property_read_u32(np, "snoop_enable_smc", + &kbdev->snoop_enable_smc)) + kbdev->snoop_enable_smc = 0; + if (of_property_read_u32(np, "snoop_disable_smc", + &kbdev->snoop_disable_smc)) + kbdev->snoop_disable_smc = 0; + /* Either both or none of the calls should be provided. */ + if (!((kbdev->snoop_disable_smc == 0 + && kbdev->snoop_enable_smc == 0) + || (kbdev->snoop_disable_smc != 0 + && kbdev->snoop_enable_smc != 0))) { + WARN_ON(1); + err = -EINVAL; + goto fail; + } + } +#endif /* CONFIG_ARM64 */ + /* Get the list of workarounds for issues on the current HW + * (identified by the GPU_ID register) + */ + err = kbase_hw_set_issues_mask(kbdev); + if (err) + goto fail; + + /* Set the list of features available on the current HW + * (identified by the GPU_ID register) + */ + kbase_hw_set_features_mask(kbdev); + + kbase_gpuprops_set_features(kbdev); + + /* On Linux 4.0+, dma coherency is determined from device tree */ +#if defined(CONFIG_ARM64) && LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0) + set_dma_ops(kbdev->dev, &noncoherent_swiotlb_dma_ops); +#endif + + /* Workaround a pre-3.13 Linux issue, where dma_mask is NULL when our + * device structure was created by device-tree + */ + if (!kbdev->dev->dma_mask) + kbdev->dev->dma_mask = &kbdev->dev->coherent_dma_mask; + + err = dma_set_mask(kbdev->dev, + DMA_BIT_MASK(kbdev->gpu_props.mmu.pa_bits)); + if (err) + goto dma_set_mask_failed; + + err = dma_set_coherent_mask(kbdev->dev, + DMA_BIT_MASK(kbdev->gpu_props.mmu.pa_bits)); + if (err) + goto dma_set_mask_failed; + + kbdev->nr_hw_address_spaces = kbdev->gpu_props.num_address_spaces; + + err = kbase_device_all_as_init(kbdev); + if (err) + goto as_init_failed; + + spin_lock_init(&kbdev->hwcnt.lock); + + err = kbasep_trace_init(kbdev); + if (err) + goto term_as; + + mutex_init(&kbdev->cacheclean_lock); + +#ifdef CONFIG_MALI_TRACE_TIMELINE + for (i = 0; i < BASE_JM_MAX_NR_SLOTS; ++i) + kbdev->timeline.slot_atoms_submitted[i] = 0; + + for (i = 0; i <= KBASEP_TIMELINE_PM_EVENT_LAST; ++i) + atomic_set(&kbdev->timeline.pm_event_uid[i], 0); +#endif /* CONFIG_MALI_TRACE_TIMELINE */ + + /* fbdump profiling controls set to 0 - fbdump not enabled until changed by gator */ + for (i = 0; i < FBDUMP_CONTROL_MAX; i++) + kbdev->kbase_profiling_controls[i] = 0; + + kbase_debug_assert_register_hook(&kbasep_trace_hook_wrapper, kbdev); + + atomic_set(&kbdev->ctx_num, 0); + + err = kbase_instr_backend_init(kbdev); + if (err) + goto term_trace; + + kbdev->pm.dvfs_period = DEFAULT_PM_DVFS_PERIOD; + + kbdev->reset_timeout_ms = DEFAULT_RESET_TIMEOUT_MS; + + if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_AARCH64_MMU)) + kbdev->mmu_mode = kbase_mmu_mode_get_aarch64(); + else + kbdev->mmu_mode = kbase_mmu_mode_get_lpae(); + +#ifdef CONFIG_MALI_DEBUG + init_waitqueue_head(&kbdev->driver_inactive_wait); +#endif /* CONFIG_MALI_DEBUG */ + + return 0; +term_trace: + kbasep_trace_term(kbdev); +term_as: + kbase_device_all_as_term(kbdev); +as_init_failed: +dma_set_mask_failed: +fail: + return err; +} + +void kbase_device_term(struct kbase_device *kbdev) +{ + KBASE_DEBUG_ASSERT(kbdev); + +#if KBASE_TRACE_ENABLE + kbase_debug_assert_register_hook(NULL, NULL); +#endif + + kbase_instr_backend_term(kbdev); + + kbasep_trace_term(kbdev); + + kbase_device_all_as_term(kbdev); +} + +void kbase_device_free(struct kbase_device *kbdev) +{ + kfree(kbdev); +} + +int kbase_device_trace_buffer_install( + struct kbase_context *kctx, u32 *tb, size_t size) +{ + unsigned long flags; + + KBASE_DEBUG_ASSERT(kctx); + KBASE_DEBUG_ASSERT(tb); + + /* Interface uses 16-bit value to track last accessed entry. Each entry + * is composed of two 32-bit words. + * This limits the size that can be handled without an overflow. */ + if (0xFFFF * (2 * sizeof(u32)) < size) + return -EINVAL; + + /* set up the header */ + /* magic number in the first 4 bytes */ + tb[0] = TRACE_BUFFER_HEADER_SPECIAL; + /* Store (write offset = 0, wrap counter = 0, transaction active = no) + * write offset 0 means never written. + * Offsets 1 to (wrap_offset - 1) used to store values when trace started + */ + tb[1] = 0; + + /* install trace buffer */ + spin_lock_irqsave(&kctx->jctx.tb_lock, flags); + kctx->jctx.tb_wrap_offset = size / 8; + kctx->jctx.tb = tb; + spin_unlock_irqrestore(&kctx->jctx.tb_lock, flags); + + return 0; +} + +void kbase_device_trace_buffer_uninstall(struct kbase_context *kctx) +{ + unsigned long flags; + + KBASE_DEBUG_ASSERT(kctx); + spin_lock_irqsave(&kctx->jctx.tb_lock, flags); + kctx->jctx.tb = NULL; + kctx->jctx.tb_wrap_offset = 0; + spin_unlock_irqrestore(&kctx->jctx.tb_lock, flags); +} + +void kbase_device_trace_register_access(struct kbase_context *kctx, enum kbase_reg_access_type type, u16 reg_offset, u32 reg_value) +{ + unsigned long flags; + + spin_lock_irqsave(&kctx->jctx.tb_lock, flags); + if (kctx->jctx.tb) { + u16 wrap_count; + u16 write_offset; + u32 *tb = kctx->jctx.tb; + u32 header_word; + + header_word = tb[1]; + KBASE_DEBUG_ASSERT(0 == (header_word & 0x1)); + + wrap_count = (header_word >> 1) & 0x7FFF; + write_offset = (header_word >> 16) & 0xFFFF; + + /* mark as transaction in progress */ + tb[1] |= 0x1; + mb(); + + /* calculate new offset */ + write_offset++; + if (write_offset == kctx->jctx.tb_wrap_offset) { + /* wrap */ + write_offset = 1; + wrap_count++; + wrap_count &= 0x7FFF; /* 15bit wrap counter */ + } + + /* store the trace entry at the selected offset */ + tb[write_offset * 2 + 0] = (reg_offset & ~0x3) | ((type == REG_WRITE) ? 0x1 : 0x0); + tb[write_offset * 2 + 1] = reg_value; + mb(); + + /* new header word */ + header_word = (write_offset << 16) | (wrap_count << 1) | 0x0; /* transaction complete */ + tb[1] = header_word; + } + spin_unlock_irqrestore(&kctx->jctx.tb_lock, flags); +} + +/* + * Device trace functions + */ +#if KBASE_TRACE_ENABLE + +static int kbasep_trace_init(struct kbase_device *kbdev) +{ + struct kbase_trace *rbuf; + + rbuf = kmalloc_array(KBASE_TRACE_SIZE, sizeof(*rbuf), GFP_KERNEL); + + if (!rbuf) + return -EINVAL; + + kbdev->trace_rbuf = rbuf; + spin_lock_init(&kbdev->trace_lock); + return 0; +} + +static void kbasep_trace_term(struct kbase_device *kbdev) +{ + kfree(kbdev->trace_rbuf); +} + +static void kbasep_trace_format_msg(struct kbase_trace *trace_msg, char *buffer, int len) +{ + s32 written = 0; + + /* Initial part of message */ + written += MAX(snprintf(buffer + written, MAX(len - written, 0), "%d.%.6d,%d,%d,%s,%p,", (int)trace_msg->timestamp.tv_sec, (int)(trace_msg->timestamp.tv_nsec / 1000), trace_msg->thread_id, trace_msg->cpu, kbasep_trace_code_string[trace_msg->code], trace_msg->ctx), 0); + + if (trace_msg->katom) + written += MAX(snprintf(buffer + written, MAX(len - written, 0), "atom %d (ud: 0x%llx 0x%llx)", trace_msg->atom_number, trace_msg->atom_udata[0], trace_msg->atom_udata[1]), 0); + + written += MAX(snprintf(buffer + written, MAX(len - written, 0), ",%.8llx,", trace_msg->gpu_addr), 0); + + /* NOTE: Could add function callbacks to handle different message types */ + /* Jobslot present */ + if (trace_msg->flags & KBASE_TRACE_FLAG_JOBSLOT) + written += MAX(snprintf(buffer + written, MAX(len - written, 0), "%d", trace_msg->jobslot), 0); + + written += MAX(snprintf(buffer + written, MAX(len - written, 0), ","), 0); + + /* Refcount present */ + if (trace_msg->flags & KBASE_TRACE_FLAG_REFCOUNT) + written += MAX(snprintf(buffer + written, MAX(len - written, 0), "%d", trace_msg->refcount), 0); + + written += MAX(snprintf(buffer + written, MAX(len - written, 0), ","), 0); + + /* Rest of message */ + written += MAX(snprintf(buffer + written, MAX(len - written, 0), "0x%.8lx", trace_msg->info_val), 0); +} + +static void kbasep_trace_dump_msg(struct kbase_device *kbdev, struct kbase_trace *trace_msg) +{ + char buffer[DEBUG_MESSAGE_SIZE]; + + kbasep_trace_format_msg(trace_msg, buffer, DEBUG_MESSAGE_SIZE); + dev_dbg(kbdev->dev, "%s", buffer); +} + +void kbasep_trace_add(struct kbase_device *kbdev, enum kbase_trace_code code, void *ctx, struct kbase_jd_atom *katom, u64 gpu_addr, u8 flags, int refcount, int jobslot, unsigned long info_val) +{ + unsigned long irqflags; + struct kbase_trace *trace_msg; + + spin_lock_irqsave(&kbdev->trace_lock, irqflags); + + trace_msg = &kbdev->trace_rbuf[kbdev->trace_next_in]; + + /* Fill the message */ + trace_msg->thread_id = task_pid_nr(current); + trace_msg->cpu = task_cpu(current); + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0) + ktime_get_real_ts64(&trace_msg->timestamp); +#else + getnstimeofday(&trace_msg->timestamp); +#endif + + trace_msg->code = code; + trace_msg->ctx = ctx; + + if (NULL == katom) { + trace_msg->katom = false; + } else { + trace_msg->katom = true; + trace_msg->atom_number = kbase_jd_atom_id(katom->kctx, katom); + trace_msg->atom_udata[0] = katom->udata.blob[0]; + trace_msg->atom_udata[1] = katom->udata.blob[1]; + } + + trace_msg->gpu_addr = gpu_addr; + trace_msg->jobslot = jobslot; + trace_msg->refcount = MIN((unsigned int)refcount, 0xFF); + trace_msg->info_val = info_val; + trace_msg->flags = flags; + + /* Update the ringbuffer indices */ + kbdev->trace_next_in = (kbdev->trace_next_in + 1) & KBASE_TRACE_MASK; + if (kbdev->trace_next_in == kbdev->trace_first_out) + kbdev->trace_first_out = (kbdev->trace_first_out + 1) & KBASE_TRACE_MASK; + + /* Done */ + + spin_unlock_irqrestore(&kbdev->trace_lock, irqflags); +} + +void kbasep_trace_clear(struct kbase_device *kbdev) +{ + unsigned long flags; + + spin_lock_irqsave(&kbdev->trace_lock, flags); + kbdev->trace_first_out = kbdev->trace_next_in; + spin_unlock_irqrestore(&kbdev->trace_lock, flags); +} + +void kbasep_trace_dump(struct kbase_device *kbdev) +{ + unsigned long flags; + u32 start; + u32 end; + + dev_dbg(kbdev->dev, "Dumping trace:\nsecs,nthread,cpu,code,ctx,katom,gpu_addr,jobslot,refcount,info_val"); + spin_lock_irqsave(&kbdev->trace_lock, flags); + start = kbdev->trace_first_out; + end = kbdev->trace_next_in; + + while (start != end) { + struct kbase_trace *trace_msg = &kbdev->trace_rbuf[start]; + + kbasep_trace_dump_msg(kbdev, trace_msg); + + start = (start + 1) & KBASE_TRACE_MASK; + } + dev_dbg(kbdev->dev, "TRACE_END"); + + spin_unlock_irqrestore(&kbdev->trace_lock, flags); + + KBASE_TRACE_CLEAR(kbdev); +} + +static void kbasep_trace_hook_wrapper(void *param) +{ + struct kbase_device *kbdev = (struct kbase_device *)param; + + kbasep_trace_dump(kbdev); +} + +#ifdef CONFIG_DEBUG_FS +struct trace_seq_state { + struct kbase_trace trace_buf[KBASE_TRACE_SIZE]; + u32 start; + u32 end; +}; + +static void *kbasep_trace_seq_start(struct seq_file *s, loff_t *pos) +{ + struct trace_seq_state *state = s->private; + int i; + + if (*pos > KBASE_TRACE_SIZE) + return NULL; + i = state->start + *pos; + if ((state->end >= state->start && i >= state->end) || + i >= state->end + KBASE_TRACE_SIZE) + return NULL; + + i &= KBASE_TRACE_MASK; + + return &state->trace_buf[i]; +} + +static void kbasep_trace_seq_stop(struct seq_file *s, void *data) +{ +} + +static void *kbasep_trace_seq_next(struct seq_file *s, void *data, loff_t *pos) +{ + struct trace_seq_state *state = s->private; + int i; + + (*pos)++; + + i = (state->start + *pos) & KBASE_TRACE_MASK; + if (i == state->end) + return NULL; + + return &state->trace_buf[i]; +} + +static int kbasep_trace_seq_show(struct seq_file *s, void *data) +{ + struct kbase_trace *trace_msg = data; + char buffer[DEBUG_MESSAGE_SIZE]; + + kbasep_trace_format_msg(trace_msg, buffer, DEBUG_MESSAGE_SIZE); + seq_printf(s, "%s\n", buffer); + return 0; +} + +static const struct seq_operations kbasep_trace_seq_ops = { + .start = kbasep_trace_seq_start, + .next = kbasep_trace_seq_next, + .stop = kbasep_trace_seq_stop, + .show = kbasep_trace_seq_show, +}; + +static int kbasep_trace_debugfs_open(struct inode *inode, struct file *file) +{ + struct kbase_device *kbdev = inode->i_private; + unsigned long flags; + + struct trace_seq_state *state; + + state = __seq_open_private(file, &kbasep_trace_seq_ops, sizeof(*state)); + if (!state) + return -ENOMEM; + + spin_lock_irqsave(&kbdev->trace_lock, flags); + state->start = kbdev->trace_first_out; + state->end = kbdev->trace_next_in; + memcpy(state->trace_buf, kbdev->trace_rbuf, sizeof(state->trace_buf)); + spin_unlock_irqrestore(&kbdev->trace_lock, flags); + + return 0; +} + +static const struct file_operations kbasep_trace_debugfs_fops = { + .open = kbasep_trace_debugfs_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release_private, +}; + +void kbasep_trace_debugfs_init(struct kbase_device *kbdev) +{ + debugfs_create_file("mali_trace", S_IRUGO, + kbdev->mali_debugfs_directory, kbdev, + &kbasep_trace_debugfs_fops); +} + +#else +void kbasep_trace_debugfs_init(struct kbase_device *kbdev) +{ +} +#endif /* CONFIG_DEBUG_FS */ + +#else /* KBASE_TRACE_ENABLE */ +static int kbasep_trace_init(struct kbase_device *kbdev) +{ + CSTD_UNUSED(kbdev); + return 0; +} + +static void kbasep_trace_term(struct kbase_device *kbdev) +{ + CSTD_UNUSED(kbdev); +} + +static void kbasep_trace_hook_wrapper(void *param) +{ + CSTD_UNUSED(param); +} + +void kbasep_trace_dump(struct kbase_device *kbdev) +{ + CSTD_UNUSED(kbdev); +} +#endif /* KBASE_TRACE_ENABLE */ + +void kbase_set_profiling_control(struct kbase_device *kbdev, u32 control, u32 value) +{ + switch (control) { + case FBDUMP_CONTROL_ENABLE: + /* fall through */ + case FBDUMP_CONTROL_RATE: + /* fall through */ + case SW_COUNTER_ENABLE: + /* fall through */ + case FBDUMP_CONTROL_RESIZE_FACTOR: + kbdev->kbase_profiling_controls[control] = value; + break; + default: + dev_err(kbdev->dev, "Profiling control %d not found\n", control); + break; + } +} + +/* + * Called by gator to control the production of + * profiling information at runtime + * */ + +void _mali_profiling_control(u32 action, u32 value) +{ + struct kbase_device *kbdev = NULL; + + /* find the first i.e. call with -1 */ + kbdev = kbase_find_device(-1); + + if (NULL != kbdev) + kbase_set_profiling_control(kbdev, action, value); +} +KBASE_EXPORT_SYMBOL(_mali_profiling_control); + diff --git a/drivers/gpu/arm/midgard/mali_kbase_disjoint_events.c b/drivers/gpu/arm/midgard/mali_kbase_disjoint_events.c new file mode 100644 index 00000000000000..f70bcccf4050f5 --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_disjoint_events.c @@ -0,0 +1,76 @@ +/* + * + * (C) COPYRIGHT 2014 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +/* + * Base kernel disjoint events helper functions + */ + +#include + +void kbase_disjoint_init(struct kbase_device *kbdev) +{ + KBASE_DEBUG_ASSERT(kbdev != NULL); + + atomic_set(&kbdev->disjoint_event.count, 0); + atomic_set(&kbdev->disjoint_event.state, 0); +} + +/* increment the disjoint event count */ +void kbase_disjoint_event(struct kbase_device *kbdev) +{ + KBASE_DEBUG_ASSERT(kbdev != NULL); + + atomic_inc(&kbdev->disjoint_event.count); +} + +/* increment the state and the event counter */ +void kbase_disjoint_state_up(struct kbase_device *kbdev) +{ + KBASE_DEBUG_ASSERT(kbdev != NULL); + + atomic_inc(&kbdev->disjoint_event.state); + + kbase_disjoint_event(kbdev); +} + +/* decrement the state */ +void kbase_disjoint_state_down(struct kbase_device *kbdev) +{ + KBASE_DEBUG_ASSERT(kbdev != NULL); + KBASE_DEBUG_ASSERT(atomic_read(&kbdev->disjoint_event.state) > 0); + + kbase_disjoint_event(kbdev); + + atomic_dec(&kbdev->disjoint_event.state); +} + +/* increments the count only if the state is > 0 */ +void kbase_disjoint_event_potential(struct kbase_device *kbdev) +{ + KBASE_DEBUG_ASSERT(kbdev != NULL); + + if (atomic_read(&kbdev->disjoint_event.state)) + kbase_disjoint_event(kbdev); +} + +u32 kbase_disjoint_event_get(struct kbase_device *kbdev) +{ + KBASE_DEBUG_ASSERT(kbdev != NULL); + + return atomic_read(&kbdev->disjoint_event.count); +} +KBASE_EXPORT_TEST_API(kbase_disjoint_event_get); diff --git a/drivers/gpu/arm/midgard/mali_kbase_dma_fence.c b/drivers/gpu/arm/midgard/mali_kbase_dma_fence.c new file mode 100644 index 00000000000000..16ace6171d6d0a --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_dma_fence.c @@ -0,0 +1,449 @@ +/* + * + * (C) COPYRIGHT 2011-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + +/* Include mali_kbase_dma_fence.h before checking for CONFIG_MALI_DMA_FENCE as + * it will be set there. + */ +#include "mali_kbase_dma_fence.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +static void +kbase_dma_fence_work(struct work_struct *pwork); + +static void +kbase_dma_fence_waiters_add(struct kbase_jd_atom *katom) +{ + struct kbase_context *kctx = katom->kctx; + + list_add_tail(&katom->queue, &kctx->dma_fence.waiting_resource); +} + +static void +kbase_dma_fence_waiters_remove(struct kbase_jd_atom *katom) +{ + list_del(&katom->queue); +} + +static int +kbase_dma_fence_lock_reservations(struct kbase_dma_fence_resv_info *info, + struct ww_acquire_ctx *ctx) +{ + struct dma_resv *content_res = NULL; + unsigned int content_res_idx = 0; + unsigned int r; + int err = 0; + + ww_acquire_init(ctx, &reservation_ww_class); + +retry: + for (r = 0; r < info->dma_fence_resv_count; r++) { + if (info->resv_objs[r] == content_res) { + content_res = NULL; + continue; + } + + err = ww_mutex_lock(&info->resv_objs[r]->lock, ctx); + if (err) + goto error; + } + + ww_acquire_done(ctx); + return err; + +error: + content_res_idx = r; + + /* Unlock the locked one ones */ + while (r--) + ww_mutex_unlock(&info->resv_objs[r]->lock); + + if (content_res) + ww_mutex_unlock(&content_res->lock); + + /* If we deadlock try with lock_slow and retry */ + if (err == -EDEADLK) { + content_res = info->resv_objs[content_res_idx]; + ww_mutex_lock_slow(&content_res->lock, ctx); + goto retry; + } + + /* If we are here the function failed */ + ww_acquire_fini(ctx); + return err; +} + +static void +kbase_dma_fence_unlock_reservations(struct kbase_dma_fence_resv_info *info, + struct ww_acquire_ctx *ctx) +{ + unsigned int r; + + for (r = 0; r < info->dma_fence_resv_count; r++) + ww_mutex_unlock(&info->resv_objs[r]->lock); + ww_acquire_fini(ctx); +} + +/** + * kbase_dma_fence_queue_work() - Queue work to handle @katom + * @katom: Pointer to atom for which to queue work + * + * Queue kbase_dma_fence_work() for @katom to clean up the fence callbacks and + * submit the atom. + */ +static void +kbase_dma_fence_queue_work(struct kbase_jd_atom *katom) +{ + struct kbase_context *kctx = katom->kctx; + bool ret; + + INIT_WORK(&katom->work, kbase_dma_fence_work); + ret = queue_work(kctx->dma_fence.wq, &katom->work); + /* Warn if work was already queued, that should not happen. */ + WARN_ON(!ret); +} + +/** + * kbase_dma_fence_cancel_atom() - Cancels waiting on an atom + * @katom: Katom to cancel + * + * Locking: katom->dma_fence.callbacks list assumes jctx.lock is held. + */ +static void +kbase_dma_fence_cancel_atom(struct kbase_jd_atom *katom) +{ + lockdep_assert_held(&katom->kctx->jctx.lock); + + /* Cancel callbacks and clean up. */ + kbase_fence_free_callbacks(katom); + + /* Mark the atom as handled in case all fences signaled just before + * canceling the callbacks and the worker was queued. + */ + kbase_fence_dep_count_set(katom, -1); + + /* Prevent job_done_nolock from being called twice on an atom when + * there is a race between job completion and cancellation. + */ + + if (katom->status == KBASE_JD_ATOM_STATE_QUEUED) { + /* Wait was cancelled - zap the atom */ + katom->event_code = BASE_JD_EVENT_JOB_CANCELLED; + if (jd_done_nolock(katom, NULL)) + kbase_js_sched_all(katom->kctx->kbdev); + } +} + +/** + * kbase_dma_fence_work() - Worker thread called when a fence is signaled + * @pwork: work_struct containing a pointer to a katom + * + * This function will clean and mark all dependencies as satisfied + */ +static void +kbase_dma_fence_work(struct work_struct *pwork) +{ + struct kbase_jd_atom *katom; + struct kbase_jd_context *ctx; + + katom = container_of(pwork, struct kbase_jd_atom, work); + ctx = &katom->kctx->jctx; + + mutex_lock(&ctx->lock); + if (kbase_fence_dep_count_read(katom) != 0) + goto out; + + kbase_fence_dep_count_set(katom, -1); + + /* Remove atom from list of dma-fence waiting atoms. */ + kbase_dma_fence_waiters_remove(katom); + /* Cleanup callbacks. */ + kbase_fence_free_callbacks(katom); + /* + * Queue atom on GPU, unless it has already completed due to a failing + * dependency. Run jd_done_nolock() on the katom if it is completed. + */ + if (unlikely(katom->status == KBASE_JD_ATOM_STATE_COMPLETED)) + jd_done_nolock(katom, NULL); + else + kbase_jd_dep_clear_locked(katom); + +out: + mutex_unlock(&ctx->lock); +} + +static void +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)) +kbase_dma_fence_cb(struct fence *fence, struct fence_cb *cb) +#else +kbase_dma_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb) +#endif +{ + struct kbase_fence_cb *kcb = container_of(cb, + struct kbase_fence_cb, + fence_cb); + struct kbase_jd_atom *katom = kcb->katom; + + /* If the atom is zapped dep_count will be forced to a negative number + * preventing this callback from ever scheduling work. Which in turn + * would reschedule the atom. + */ + + if (kbase_fence_dep_count_dec_and_test(katom)) + kbase_dma_fence_queue_work(katom); +} + +static int +kbase_dma_fence_add_reservation_callback(struct kbase_jd_atom *katom, + struct dma_resv *resv, + bool exclusive) +{ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)) + struct fence *excl_fence = NULL; + struct fence **shared_fences = NULL; +#else + struct dma_fence *excl_fence = NULL; + struct dma_fence **shared_fences = NULL; +#endif + unsigned int shared_count = 0; + int err, i; + + err = dma_resv_get_fences_rcu(resv, + &excl_fence, + &shared_count, + &shared_fences); + if (err) + return err; + + if (excl_fence) { + err = kbase_fence_add_callback(katom, + excl_fence, + kbase_dma_fence_cb); + + /* Release our reference, taken by reservation_object_get_fences_rcu(), + * to the fence. We have set up our callback (if that was possible), + * and it's the fence's owner is responsible for singling the fence + * before allowing it to disappear. + */ + dma_fence_put(excl_fence); + + if (err) + goto out; + } + + if (exclusive) { + for (i = 0; i < shared_count; i++) { + err = kbase_fence_add_callback(katom, + shared_fences[i], + kbase_dma_fence_cb); + if (err) + goto out; + } + } + + /* Release all our references to the shared fences, taken by + * reservation_object_get_fences_rcu(). We have set up our callback (if + * that was possible), and it's the fence's owner is responsible for + * signaling the fence before allowing it to disappear. + */ +out: + for (i = 0; i < shared_count; i++) + dma_fence_put(shared_fences[i]); + kfree(shared_fences); + + if (err) { + /* + * On error, cancel and clean up all callbacks that was set up + * before the error. + */ + kbase_fence_free_callbacks(katom); + } + + return err; +} + +void kbase_dma_fence_add_reservation(struct dma_resv *resv, + struct kbase_dma_fence_resv_info *info, + bool exclusive) +{ + unsigned int i; + + for (i = 0; i < info->dma_fence_resv_count; i++) { + /* Duplicate resource, ignore */ + if (info->resv_objs[i] == resv) + return; + } + + info->resv_objs[info->dma_fence_resv_count] = resv; + if (exclusive) + set_bit(info->dma_fence_resv_count, + info->dma_fence_excl_bitmap); + (info->dma_fence_resv_count)++; +} + +int kbase_dma_fence_wait(struct kbase_jd_atom *katom, + struct kbase_dma_fence_resv_info *info) +{ + int err, i; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)) + struct fence *fence; +#else + struct dma_fence *fence; +#endif + struct ww_acquire_ctx ww_ctx; + + lockdep_assert_held(&katom->kctx->jctx.lock); + + fence = kbase_fence_out_new(katom); + if (!fence) { + err = -ENOMEM; + dev_err(katom->kctx->kbdev->dev, + "Error %d creating fence.\n", err); + return err; + } + + kbase_fence_dep_count_set(katom, 1); + + err = kbase_dma_fence_lock_reservations(info, &ww_ctx); + if (err) { + dev_err(katom->kctx->kbdev->dev, + "Error %d locking reservations.\n", err); + kbase_fence_dep_count_set(katom, -1); + kbase_fence_out_remove(katom); + return err; + } + + for (i = 0; i < info->dma_fence_resv_count; i++) { + struct dma_resv *obj = info->resv_objs[i]; + + if (!test_bit(i, info->dma_fence_excl_bitmap)) { + err = dma_resv_reserve_shared(obj, 1); + if (err) { + dev_err(katom->kctx->kbdev->dev, + "Error %d reserving space for shared fence.\n", err); + goto end; + } + + err = kbase_dma_fence_add_reservation_callback(katom, obj, false); + if (err) { + dev_err(katom->kctx->kbdev->dev, + "Error %d adding reservation to callback.\n", err); + goto end; + } + + dma_resv_add_shared_fence(obj, fence); + } else { + err = kbase_dma_fence_add_reservation_callback(katom, obj, true); + if (err) { + dev_err(katom->kctx->kbdev->dev, + "Error %d adding reservation to callback.\n", err); + goto end; + } + + dma_resv_add_excl_fence(obj, fence); + } + } + +end: + kbase_dma_fence_unlock_reservations(info, &ww_ctx); + + if (likely(!err)) { + /* Test if the callbacks are already triggered */ + if (kbase_fence_dep_count_dec_and_test(katom)) { + kbase_fence_dep_count_set(katom, -1); + kbase_fence_free_callbacks(katom); + } else { + /* Add katom to the list of dma-buf fence waiting atoms + * only if it is still waiting. + */ + kbase_dma_fence_waiters_add(katom); + } + } else { + /* There was an error, cancel callbacks, set dep_count to -1 to + * indicate that the atom has been handled (the caller will + * kill it for us), signal the fence, free callbacks and the + * fence. + */ + kbase_fence_free_callbacks(katom); + kbase_fence_dep_count_set(katom, -1); + kbase_dma_fence_signal(katom); + } + + return err; +} + +void kbase_dma_fence_cancel_all_atoms(struct kbase_context *kctx) +{ + struct list_head *list = &kctx->dma_fence.waiting_resource; + + while (!list_empty(list)) { + struct kbase_jd_atom *katom; + + katom = list_first_entry(list, struct kbase_jd_atom, queue); + kbase_dma_fence_waiters_remove(katom); + kbase_dma_fence_cancel_atom(katom); + } +} + +void kbase_dma_fence_cancel_callbacks(struct kbase_jd_atom *katom) +{ + /* Cancel callbacks and clean up. */ + if (kbase_fence_free_callbacks(katom)) + kbase_dma_fence_queue_work(katom); +} + +void kbase_dma_fence_signal(struct kbase_jd_atom *katom) +{ + if (!katom->dma_fence.fence) + return; + + /* Signal the atom's fence. */ + dma_fence_signal(katom->dma_fence.fence); + + kbase_fence_out_remove(katom); + + kbase_fence_free_callbacks(katom); +} + +void kbase_dma_fence_term(struct kbase_context *kctx) +{ + destroy_workqueue(kctx->dma_fence.wq); + kctx->dma_fence.wq = NULL; +} + +int kbase_dma_fence_init(struct kbase_context *kctx) +{ + INIT_LIST_HEAD(&kctx->dma_fence.waiting_resource); + + kctx->dma_fence.wq = alloc_workqueue("mali-fence-%d", + WQ_UNBOUND, 1, kctx->pid); + if (!kctx->dma_fence.wq) + return -ENOMEM; + + return 0; +} diff --git a/drivers/gpu/arm/midgard/mali_kbase_dma_fence.h b/drivers/gpu/arm/midgard/mali_kbase_dma_fence.h new file mode 100644 index 00000000000000..b1c2dc7b21af91 --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_dma_fence.h @@ -0,0 +1,131 @@ +/* + * + * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +#ifndef _KBASE_DMA_FENCE_H_ +#define _KBASE_DMA_FENCE_H_ + +#ifdef CONFIG_MALI_DMA_FENCE + +#include +#include +#include + + +/* Forward declaration from mali_kbase_defs.h */ +struct kbase_jd_atom; +struct kbase_context; + +/** + * struct kbase_dma_fence_resv_info - Structure with list of reservation objects + * @resv_objs: Array of reservation objects to attach the + * new fence to. + * @dma_fence_resv_count: Number of reservation objects in the array. + * @dma_fence_excl_bitmap: Specifies which resv_obj are exclusive. + * + * This is used by some functions to pass around a collection of data about + * reservation objects. + */ +struct kbase_dma_fence_resv_info { + struct dma_resv **resv_objs; + unsigned int dma_fence_resv_count; + unsigned long *dma_fence_excl_bitmap; +}; + +/** + * kbase_dma_fence_add_reservation() - Adds a resv to the array of resv_objs + * @resv: Reservation object to add to the array. + * @info: Pointer to struct with current reservation info + * @exclusive: Boolean indicating if exclusive access is needed + * + * The function adds a new reservation_object to an existing array of + * reservation_objects. At the same time keeps track of which objects require + * exclusive access in dma_fence_excl_bitmap. + */ +void kbase_dma_fence_add_reservation(struct dma_resv *resv, + struct kbase_dma_fence_resv_info *info, + bool exclusive); + +/** + * kbase_dma_fence_wait() - Creates a new fence and attaches it to the resv_objs + * @katom: Katom with the external dependency. + * @info: Pointer to struct with current reservation info + * + * Return: An error code or 0 if succeeds + */ +int kbase_dma_fence_wait(struct kbase_jd_atom *katom, + struct kbase_dma_fence_resv_info *info); + +/** + * kbase_dma_fence_cancel_ctx() - Cancel all dma-fences blocked atoms on kctx + * @kctx: Pointer to kbase context + * + * This function will cancel and clean up all katoms on @kctx that is waiting + * on dma-buf fences. + * + * Locking: jctx.lock needs to be held when calling this function. + */ +void kbase_dma_fence_cancel_all_atoms(struct kbase_context *kctx); + +/** + * kbase_dma_fence_cancel_callbacks() - Cancel only callbacks on katom + * @katom: Pointer to katom whose callbacks are to be canceled + * + * This function cancels all dma-buf fence callbacks on @katom, but does not + * cancel the katom itself. + * + * The caller is responsible for ensuring that jd_done_nolock is called on + * @katom. + * + * Locking: jctx.lock must be held when calling this function. + */ +void kbase_dma_fence_cancel_callbacks(struct kbase_jd_atom *katom); + +/** + * kbase_dma_fence_signal() - Signal katom's fence and clean up after wait + * @katom: Pointer to katom to signal and clean up + * + * This function will signal the @katom's fence, if it has one, and clean up + * the callback data from the katom's wait on earlier fences. + * + * Locking: jctx.lock must be held while calling this function. + */ +void kbase_dma_fence_signal(struct kbase_jd_atom *katom); + +/** + * kbase_dma_fence_term() - Terminate Mali dma-fence context + * @kctx: kbase context to terminate + */ +void kbase_dma_fence_term(struct kbase_context *kctx); + +/** + * kbase_dma_fence_init() - Initialize Mali dma-fence context + * @kctx: kbase context to initialize + */ +int kbase_dma_fence_init(struct kbase_context *kctx); + + +#else /* CONFIG_MALI_DMA_FENCE */ +/* Dummy functions for when dma-buf fence isn't enabled. */ + +static inline int kbase_dma_fence_init(struct kbase_context *kctx) +{ + return 0; +} + +static inline void kbase_dma_fence_term(struct kbase_context *kctx) {} +#endif /* CONFIG_MALI_DMA_FENCE */ +#endif diff --git a/drivers/gpu/arm/midgard/mali_kbase_event.c b/drivers/gpu/arm/midgard/mali_kbase_event.c new file mode 100644 index 00000000000000..188148645f374b --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_event.c @@ -0,0 +1,259 @@ +/* + * + * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + + +#include +#include +#include + +static struct base_jd_udata kbase_event_process(struct kbase_context *kctx, struct kbase_jd_atom *katom) +{ + struct base_jd_udata data; + + lockdep_assert_held(&kctx->jctx.lock); + + KBASE_DEBUG_ASSERT(kctx != NULL); + KBASE_DEBUG_ASSERT(katom != NULL); + KBASE_DEBUG_ASSERT(katom->status == KBASE_JD_ATOM_STATE_COMPLETED); + + data = katom->udata; + + KBASE_TIMELINE_ATOMS_IN_FLIGHT(kctx, atomic_sub_return(1, &kctx->timeline.jd_atoms_in_flight)); + + KBASE_TLSTREAM_TL_NRET_ATOM_CTX(katom, kctx); + KBASE_TLSTREAM_TL_DEL_ATOM(katom); + + katom->status = KBASE_JD_ATOM_STATE_UNUSED; + + wake_up(&katom->completed); + + return data; +} + +int kbase_event_pending(struct kbase_context *ctx) +{ + KBASE_DEBUG_ASSERT(ctx); + + return (atomic_read(&ctx->event_count) != 0) || + (atomic_read(&ctx->event_closed) != 0); +} + +KBASE_EXPORT_TEST_API(kbase_event_pending); + +int kbase_event_dequeue(struct kbase_context *ctx, struct base_jd_event_v2 *uevent) +{ + struct kbase_jd_atom *atom; + + KBASE_DEBUG_ASSERT(ctx); + + mutex_lock(&ctx->event_mutex); + + if (list_empty(&ctx->event_list)) { + if (!atomic_read(&ctx->event_closed)) { + mutex_unlock(&ctx->event_mutex); + return -1; + } + + /* generate the BASE_JD_EVENT_DRV_TERMINATED message on the fly */ + mutex_unlock(&ctx->event_mutex); + uevent->event_code = BASE_JD_EVENT_DRV_TERMINATED; + memset(&uevent->udata, 0, sizeof(uevent->udata)); + dev_dbg(ctx->kbdev->dev, + "event system closed, returning BASE_JD_EVENT_DRV_TERMINATED(0x%X)\n", + BASE_JD_EVENT_DRV_TERMINATED); + return 0; + } + + /* normal event processing */ + atomic_dec(&ctx->event_count); + atom = list_entry(ctx->event_list.next, struct kbase_jd_atom, dep_item[0]); + list_del(ctx->event_list.next); + + mutex_unlock(&ctx->event_mutex); + + dev_dbg(ctx->kbdev->dev, "event dequeuing %p\n", (void *)atom); + uevent->event_code = atom->event_code; + uevent->atom_number = (atom - ctx->jctx.atoms); + + if (atom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES) + kbase_jd_free_external_resources(atom); + + mutex_lock(&ctx->jctx.lock); + uevent->udata = kbase_event_process(ctx, atom); + mutex_unlock(&ctx->jctx.lock); + + return 0; +} + +KBASE_EXPORT_TEST_API(kbase_event_dequeue); + +/** + * kbase_event_process_noreport_worker - Worker for processing atoms that do not + * return an event but do have external + * resources + * @data: Work structure + */ +static void kbase_event_process_noreport_worker(struct work_struct *data) +{ + struct kbase_jd_atom *katom = container_of(data, struct kbase_jd_atom, + work); + struct kbase_context *kctx = katom->kctx; + + if (katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES) + kbase_jd_free_external_resources(katom); + + mutex_lock(&kctx->jctx.lock); + kbase_event_process(kctx, katom); + mutex_unlock(&kctx->jctx.lock); +} + +/** + * kbase_event_process_noreport - Process atoms that do not return an event + * @kctx: Context pointer + * @katom: Atom to be processed + * + * Atoms that do not have external resources will be processed immediately. + * Atoms that do have external resources will be processed on a workqueue, in + * order to avoid locking issues. + */ +static void kbase_event_process_noreport(struct kbase_context *kctx, + struct kbase_jd_atom *katom) +{ + if (katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES) { + INIT_WORK(&katom->work, kbase_event_process_noreport_worker); + queue_work(kctx->event_workq, &katom->work); + } else { + kbase_event_process(kctx, katom); + } +} + +/** + * kbase_event_coalesce - Move pending events to the main event list + * @kctx: Context pointer + * + * kctx->event_list and kctx->event_coalesce_count must be protected + * by a lock unless this is the last thread using them + * (and we're about to terminate the lock). + * + * Return: The number of pending events moved to the main event list + */ +static int kbase_event_coalesce(struct kbase_context *kctx) +{ + const int event_count = kctx->event_coalesce_count; + + /* Join the list of pending events onto the tail of the main list + and reset it */ + list_splice_tail_init(&kctx->event_coalesce_list, &kctx->event_list); + kctx->event_coalesce_count = 0; + + /* Return the number of events moved */ + return event_count; +} + +void kbase_event_post(struct kbase_context *ctx, struct kbase_jd_atom *atom) +{ + if (atom->core_req & BASE_JD_REQ_EVENT_ONLY_ON_FAILURE) { + if (atom->event_code == BASE_JD_EVENT_DONE) { + /* Don't report the event */ + kbase_event_process_noreport(ctx, atom); + return; + } + } + + if (atom->core_req & BASEP_JD_REQ_EVENT_NEVER) { + /* Don't report the event */ + kbase_event_process_noreport(ctx, atom); + return; + } + KBASE_TLSTREAM_TL_ATTRIB_ATOM_STATE(atom, TL_ATOM_STATE_POSTED); + if (atom->core_req & BASE_JD_REQ_EVENT_COALESCE) { + /* Don't report the event until other event(s) have completed */ + mutex_lock(&ctx->event_mutex); + list_add_tail(&atom->dep_item[0], &ctx->event_coalesce_list); + ++ctx->event_coalesce_count; + mutex_unlock(&ctx->event_mutex); + } else { + /* Report the event and any pending events now */ + int event_count = 1; + + mutex_lock(&ctx->event_mutex); + event_count += kbase_event_coalesce(ctx); + list_add_tail(&atom->dep_item[0], &ctx->event_list); + atomic_add(event_count, &ctx->event_count); + mutex_unlock(&ctx->event_mutex); + + kbase_event_wakeup(ctx); + } +} +KBASE_EXPORT_TEST_API(kbase_event_post); + +void kbase_event_close(struct kbase_context *kctx) +{ + mutex_lock(&kctx->event_mutex); + atomic_set(&kctx->event_closed, true); + mutex_unlock(&kctx->event_mutex); + kbase_event_wakeup(kctx); +} + +int kbase_event_init(struct kbase_context *kctx) +{ + KBASE_DEBUG_ASSERT(kctx); + + INIT_LIST_HEAD(&kctx->event_list); + INIT_LIST_HEAD(&kctx->event_coalesce_list); + mutex_init(&kctx->event_mutex); + atomic_set(&kctx->event_count, 0); + kctx->event_coalesce_count = 0; + atomic_set(&kctx->event_closed, false); + kctx->event_workq = alloc_workqueue("kbase_event", WQ_MEM_RECLAIM, 1); + + if (NULL == kctx->event_workq) + return -EINVAL; + + return 0; +} + +KBASE_EXPORT_TEST_API(kbase_event_init); + +void kbase_event_cleanup(struct kbase_context *kctx) +{ + int event_count; + + KBASE_DEBUG_ASSERT(kctx); + KBASE_DEBUG_ASSERT(kctx->event_workq); + + flush_workqueue(kctx->event_workq); + destroy_workqueue(kctx->event_workq); + + /* We use kbase_event_dequeue to remove the remaining events as that + * deals with all the cleanup needed for the atoms. + * + * Note: use of kctx->event_list without a lock is safe because this must be the last + * thread using it (because we're about to terminate the lock) + */ + event_count = kbase_event_coalesce(kctx); + atomic_add(event_count, &kctx->event_count); + + while (!list_empty(&kctx->event_list)) { + struct base_jd_event_v2 event; + + kbase_event_dequeue(kctx, &event); + } +} + +KBASE_EXPORT_TEST_API(kbase_event_cleanup); diff --git a/drivers/gpu/arm/midgard/mali_kbase_fence.c b/drivers/gpu/arm/midgard/mali_kbase_fence.c new file mode 100644 index 00000000000000..17b5621043d6e7 --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_fence.c @@ -0,0 +1,201 @@ +/* + * + * (C) COPYRIGHT 2011-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +#include +#include +#include +#include +#include +#include + +/* Spin lock protecting all Mali fences as fence->lock. */ +static DEFINE_SPINLOCK(kbase_fence_lock); + +static const char * +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)) +kbase_fence_get_driver_name(struct fence *fence) +#else +kbase_fence_get_driver_name(struct dma_fence *fence) +#endif +{ + return kbase_drv_name; +} + +static const char * +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)) +kbase_fence_get_timeline_name(struct fence *fence) +#else +kbase_fence_get_timeline_name(struct dma_fence *fence) +#endif +{ + return kbase_timeline_name; +} + +static bool +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)) +kbase_fence_enable_signaling(struct fence *fence) +#else +kbase_fence_enable_signaling(struct dma_fence *fence) +#endif +{ + return true; +} + +static void +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)) +kbase_fence_fence_value_str(struct fence *fence, char *str, int size) +#else +kbase_fence_fence_value_str(struct dma_fence *fence, char *str, int size) +#endif +{ + snprintf(str, size, "%u", fence->seqno); +} + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)) +const struct fence_ops kbase_fence_ops = { + .wait = fence_default_wait, +#else +const struct dma_fence_ops kbase_fence_ops = { + .wait = dma_fence_default_wait, +#endif + .get_driver_name = kbase_fence_get_driver_name, + .get_timeline_name = kbase_fence_get_timeline_name, + .enable_signaling = kbase_fence_enable_signaling, + .fence_value_str = kbase_fence_fence_value_str +}; + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)) +struct fence * +kbase_fence_out_new(struct kbase_jd_atom *katom) +#else +struct dma_fence * +kbase_fence_out_new(struct kbase_jd_atom *katom) +#endif +{ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)) + struct fence *fence; +#else + struct dma_fence *fence; +#endif + + WARN_ON(katom->dma_fence.fence); + + fence = kzalloc(sizeof(*fence), GFP_KERNEL); + if (!fence) + return NULL; + + dma_fence_init(fence, + &kbase_fence_ops, + &kbase_fence_lock, + katom->dma_fence.context, + atomic_inc_return(&katom->dma_fence.seqno)); + + katom->dma_fence.fence = fence; + + return fence; +} + +bool +kbase_fence_free_callbacks(struct kbase_jd_atom *katom) +{ + struct kbase_fence_cb *cb, *tmp; + bool res = false; + + lockdep_assert_held(&katom->kctx->jctx.lock); + + /* Clean up and free callbacks. */ + list_for_each_entry_safe(cb, tmp, &katom->dma_fence.callbacks, node) { + bool ret; + + /* Cancel callbacks that hasn't been called yet. */ + ret = dma_fence_remove_callback(cb->fence, &cb->fence_cb); + if (ret) { + int ret; + + /* Fence had not signaled, clean up after + * canceling. + */ + ret = atomic_dec_return(&katom->dma_fence.dep_count); + + if (unlikely(ret == 0)) + res = true; + } + + /* + * Release the reference taken in + * kbase_fence_add_callback(). + */ + dma_fence_put(cb->fence); + list_del(&cb->node); + kfree(cb); + } + + return res; +} + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)) +int +kbase_fence_add_callback(struct kbase_jd_atom *katom, + struct fence *fence, + fence_func_t callback) +#else +int +kbase_fence_add_callback(struct kbase_jd_atom *katom, + struct dma_fence *fence, + dma_fence_func_t callback) +#endif +{ + int err = 0; + struct kbase_fence_cb *kbase_fence_cb; + + if (!fence) + return -EINVAL; + + kbase_fence_cb = kmalloc(sizeof(*kbase_fence_cb), GFP_KERNEL); + if (!kbase_fence_cb) + return -ENOMEM; + + kbase_fence_cb->fence = fence; + kbase_fence_cb->katom = katom; + INIT_LIST_HEAD(&kbase_fence_cb->node); + + err = dma_fence_add_callback(fence, &kbase_fence_cb->fence_cb, + callback); + if (err == -ENOENT) { + /* Fence signaled, get the completion result */ + err = dma_fence_get_status(fence); + + /* remap success completion to err code */ + if (err == 1) + err = 0; + + kfree(kbase_fence_cb); + } else if (err) { + kfree(kbase_fence_cb); + } else { + /* + * Get reference to fence that will be kept until callback gets + * cleaned up in kbase_fence_free_callbacks(). + */ + dma_fence_get(fence); + atomic_inc(&katom->dma_fence.dep_count); + /* Add callback to katom's list of callbacks */ + list_add(&kbase_fence_cb->node, &katom->dma_fence.callbacks); + } + + return err; +} diff --git a/drivers/gpu/arm/midgard/mali_kbase_fence.h b/drivers/gpu/arm/midgard/mali_kbase_fence.h new file mode 100644 index 00000000000000..f3ed025f7f6ca9 --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_fence.h @@ -0,0 +1,270 @@ +/* + * + * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +#ifndef _KBASE_FENCE_H_ +#define _KBASE_FENCE_H_ + +/* + * mali_kbase_fence.[hc] has common fence code used by both + * - CONFIG_MALI_DMA_FENCE - implicit DMA fences + * - CONFIG_SYNC_FILE - explicit fences beginning with 4.9 kernel + */ + +#if defined(CONFIG_MALI_DMA_FENCE) || defined(CONFIG_SYNC_FILE) + +#include +#include "mali_kbase_fence_defs.h" +#include "mali_kbase.h" + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)) +extern const struct fence_ops kbase_fence_ops; +#else +extern const struct dma_fence_ops kbase_fence_ops; +#endif + +/** +* struct kbase_fence_cb - Mali dma-fence callback data struct +* @fence_cb: Callback function +* @katom: Pointer to katom that is waiting on this callback +* @fence: Pointer to the fence object on which this callback is waiting +* @node: List head for linking this callback to the katom +*/ +struct kbase_fence_cb { +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)) + struct fence_cb fence_cb; + struct fence *fence; +#else + struct dma_fence_cb fence_cb; + struct dma_fence *fence; +#endif + struct kbase_jd_atom *katom; + struct list_head node; +}; + +/** + * kbase_fence_out_new() - Creates a new output fence and puts it on the atom + * @katom: Atom to create an output fence for + * + * return: A new fence object on success, NULL on failure. + */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)) +struct fence *kbase_fence_out_new(struct kbase_jd_atom *katom); +#else +struct dma_fence *kbase_fence_out_new(struct kbase_jd_atom *katom); +#endif + +#if defined(CONFIG_SYNC_FILE) +/** + * kbase_fence_fence_in_set() - Assign input fence to atom + * @katom: Atom to assign input fence to + * @fence: Input fence to assign to atom + * + * This function will take ownership of one fence reference! + */ +#define kbase_fence_fence_in_set(katom, fence) \ + do { \ + WARN_ON((katom)->dma_fence.fence_in); \ + (katom)->dma_fence.fence_in = fence; \ + } while (0) +#endif + +/** + * kbase_fence_out_remove() - Removes the output fence from atom + * @katom: Atom to remove output fence for + * + * This will also release the reference to this fence which the atom keeps + */ +static inline void kbase_fence_out_remove(struct kbase_jd_atom *katom) +{ + if (katom->dma_fence.fence) { + dma_fence_put(katom->dma_fence.fence); + katom->dma_fence.fence = NULL; + } +} + +#if defined(CONFIG_SYNC_FILE) +/** + * kbase_fence_out_remove() - Removes the input fence from atom + * @katom: Atom to remove input fence for + * + * This will also release the reference to this fence which the atom keeps + */ +static inline void kbase_fence_in_remove(struct kbase_jd_atom *katom) +{ + if (katom->dma_fence.fence_in) { + dma_fence_put(katom->dma_fence.fence_in); + katom->dma_fence.fence_in = NULL; + } +} +#endif + +/** + * kbase_fence_out_is_ours() - Check if atom has a valid fence created by us + * @katom: Atom to check output fence for + * + * Return: true if fence exists and is valid, otherwise false + */ +static inline bool kbase_fence_out_is_ours(struct kbase_jd_atom *katom) +{ + return katom->dma_fence.fence && + katom->dma_fence.fence->ops == &kbase_fence_ops; +} + +/** + * kbase_fence_out_signal() - Signal output fence of atom + * @katom: Atom to signal output fence for + * @status: Status to signal with (0 for success, < 0 for error) + * + * Return: 0 on success, < 0 on error + */ +static inline int kbase_fence_out_signal(struct kbase_jd_atom *katom, + int status) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) + katom->dma_fence.fence->error = status; +#else + katom->dma_fence.fence->status = status; +#endif + return dma_fence_signal(katom->dma_fence.fence); +} + +/** + * kbase_fence_add_callback() - Add callback on @fence to block @katom + * @katom: Pointer to katom that will be blocked by @fence + * @fence: Pointer to fence on which to set up the callback + * @callback: Pointer to function to be called when fence is signaled + * + * Caller needs to hold a reference to @fence when calling this function, and + * the caller is responsible for releasing that reference. An additional + * reference to @fence will be taken when the callback was successfully set up + * and @fence needs to be kept valid until the callback has been called and + * cleanup have been done. + * + * Return: 0 on success: fence was either already signaled, or callback was + * set up. Negative error code is returned on error. + */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)) +int kbase_fence_add_callback(struct kbase_jd_atom *katom, + struct fence *fence, + fence_func_t callback); +#else +int kbase_fence_add_callback(struct kbase_jd_atom *katom, + struct dma_fence *fence, + dma_fence_func_t callback); +#endif + +/** + * kbase_fence_dep_count_set() - Set dep_count value on atom to specified value + * @katom: Atom to set dep_count for + * @val: value to set dep_count to + * + * The dep_count is available to the users of this module so that they can + * synchronize completion of the wait with cancellation and adding of more + * callbacks. For instance, a user could do the following: + * + * dep_count set to 1 + * callback #1 added, dep_count is increased to 2 + * callback #1 happens, dep_count decremented to 1 + * since dep_count > 0, no completion is done + * callback #2 is added, dep_count is increased to 2 + * dep_count decremented to 1 + * callback #2 happens, dep_count decremented to 0 + * since dep_count now is zero, completion executes + * + * The dep_count can also be used to make sure that the completion only + * executes once. This is typically done by setting dep_count to -1 for the + * thread that takes on this responsibility. + */ +static inline void +kbase_fence_dep_count_set(struct kbase_jd_atom *katom, int val) +{ + atomic_set(&katom->dma_fence.dep_count, val); +} + +/** + * kbase_fence_dep_count_dec_and_test() - Decrements dep_count + * @katom: Atom to decrement dep_count for + * + * See @kbase_fence_dep_count_set for general description about dep_count + * + * Return: true if value was decremented to zero, otherwise false + */ +static inline bool +kbase_fence_dep_count_dec_and_test(struct kbase_jd_atom *katom) +{ + return atomic_dec_and_test(&katom->dma_fence.dep_count); +} + +/** + * kbase_fence_dep_count_read() - Returns the current dep_count value + * @katom: Pointer to katom + * + * See @kbase_fence_dep_count_set for general description about dep_count + * + * Return: The current dep_count value + */ +static inline int kbase_fence_dep_count_read(struct kbase_jd_atom *katom) +{ + return atomic_read(&katom->dma_fence.dep_count); +} + +/** + * kbase_fence_free_callbacks() - Free dma-fence callbacks on a katom + * @katom: Pointer to katom + * + * This function will free all fence callbacks on the katom's list of + * callbacks. Callbacks that have not yet been called, because their fence + * hasn't yet signaled, will first be removed from the fence. + * + * Locking: katom->dma_fence.callbacks list assumes jctx.lock is held. + * + * Return: true if dep_count reached 0, otherwise false. + */ +bool kbase_fence_free_callbacks(struct kbase_jd_atom *katom); + +#if defined(CONFIG_SYNC_FILE) +/** + * kbase_fence_in_get() - Retrieve input fence for atom. + * @katom: Atom to get input fence from + * + * A ref will be taken for the fence, so use @kbase_fence_put() to release it + * + * Return: The fence, or NULL if there is no input fence for atom + */ +#define kbase_fence_in_get(katom) dma_fence_get((katom)->dma_fence.fence_in) +#endif + +/** + * kbase_fence_out_get() - Retrieve output fence for atom. + * @katom: Atom to get output fence from + * + * A ref will be taken for the fence, so use @kbase_fence_put() to release it + * + * Return: The fence, or NULL if there is no output fence for atom + */ +#define kbase_fence_out_get(katom) dma_fence_get((katom)->dma_fence.fence) + +/** + * kbase_fence_put() - Releases a reference to a fence + * @fence: Fence to release reference for. + */ +#define kbase_fence_put(fence) dma_fence_put(fence) + + +#endif /* CONFIG_MALI_DMA_FENCE || defined(CONFIG_SYNC_FILE */ + +#endif /* _KBASE_FENCE_H_ */ diff --git a/drivers/gpu/arm/midgard/mali_kbase_fence_defs.h b/drivers/gpu/arm/midgard/mali_kbase_fence_defs.h new file mode 100644 index 00000000000000..9e027fc11bd540 --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_fence_defs.h @@ -0,0 +1,58 @@ +/* + * + * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +#ifndef _KBASE_FENCE_DEFS_H_ +#define _KBASE_FENCE_DEFS_H_ + +/* + * There was a big rename in the 4.10 kernel (fence* -> dma_fence*) + * This file hides the compatibility issues with this for the rest the driver + */ + +#if defined(CONFIG_MALI_DMA_FENCE) || defined(CONFIG_SYNC_FILE) + +#include + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)) + +#include + +#define dma_fence_context_alloc(a) fence_context_alloc(a) +#define dma_fence_init(a, b, c, d, e) fence_init(a, b, c, d, e) +#define dma_fence_get(a) fence_get(a) +#define dma_fence_put(a) fence_put(a) +#define dma_fence_signal(a) fence_signal(a) +#define dma_fence_is_signaled(a) fence_is_signaled(a) +#define dma_fence_add_callback(a, b, c) fence_add_callback(a, b, c) +#define dma_fence_remove_callback(a, b) fence_remove_callback(a, b) +#define dma_fence_get_status(a) (fence_is_signaled(a) ? (a)->status ?: 1 : 0) + +#else + +#include + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) +#define dma_fence_get_status(a) (dma_fence_is_signaled(a) ? \ + (a)->status ?: 1 \ + : 0) +#endif + +#endif /* < 4.10.0 */ + +#endif /* CONFIG_MALI_DMA_FENCE || CONFIG_SYNC_FILE */ + +#endif /* _KBASE_FENCE_DEFS_H_ */ diff --git a/drivers/gpu/arm/midgard/mali_kbase_gator.h b/drivers/gpu/arm/midgard/mali_kbase_gator.h new file mode 100644 index 00000000000000..ce65b5562a2b53 --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_gator.h @@ -0,0 +1,45 @@ +/* + * + * (C) COPYRIGHT 2011-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +/* NB taken from gator */ +/* + * List of possible actions to be controlled by DS-5 Streamline. + * The following numbers are used by gator to control the frame buffer dumping + * and s/w counter reporting. We cannot use the enums in mali_uk_types.h because + * they are unknown inside gator. + */ +#ifndef _KBASE_GATOR_H_ +#define _KBASE_GATOR_H_ + +#ifdef CONFIG_MALI_GATOR_SUPPORT +#define GATOR_MAKE_EVENT(type, number) (((type) << 24) | ((number) << 16)) +#define GATOR_JOB_SLOT_START 1 +#define GATOR_JOB_SLOT_STOP 2 +#define GATOR_JOB_SLOT_SOFT_STOPPED 3 + +void kbase_trace_mali_job_slots_event(u32 event, const struct kbase_context *kctx, u8 atom_id); +void kbase_trace_mali_pm_status(u32 event, u64 value); +void kbase_trace_mali_pm_power_off(u32 event, u64 value); +void kbase_trace_mali_pm_power_on(u32 event, u64 value); +void kbase_trace_mali_page_fault_insert_pages(int event, u32 value); +void kbase_trace_mali_mmu_as_in_use(int event); +void kbase_trace_mali_mmu_as_released(int event); +void kbase_trace_mali_total_alloc_pages_change(long long int event); + +#endif /* CONFIG_MALI_GATOR_SUPPORT */ + +#endif /* _KBASE_GATOR_H_ */ diff --git a/drivers/gpu/arm/midgard/mali_kbase_gator_api.c b/drivers/gpu/arm/midgard/mali_kbase_gator_api.c new file mode 100644 index 00000000000000..fac4d94a25a022 --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_gator_api.c @@ -0,0 +1,338 @@ +/* + * + * (C) COPYRIGHT 2014-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +#include "mali_kbase.h" +#include "mali_kbase_hw.h" +#include "mali_kbase_mem_linux.h" +#include "mali_kbase_gator_api.h" +#include "mali_kbase_gator_hwcnt_names.h" + +#define MALI_MAX_CORES_PER_GROUP 4 +#define MALI_MAX_NUM_BLOCKS_PER_GROUP 8 +#define MALI_COUNTERS_PER_BLOCK 64 +#define MALI_BYTES_PER_COUNTER 4 + +struct kbase_gator_hwcnt_handles { + struct kbase_device *kbdev; + struct kbase_vinstr_client *vinstr_cli; + void *vinstr_buffer; + struct work_struct dump_work; + int dump_complete; + spinlock_t dump_lock; +}; + +static void dump_worker(struct work_struct *work); + +const char * const *kbase_gator_hwcnt_init_names(uint32_t *total_counters) +{ + const char * const *hardware_counters; + struct kbase_device *kbdev; + uint32_t product_id; + uint32_t count; + + if (!total_counters) + return NULL; + + /* Get the first device - it doesn't matter in this case */ + kbdev = kbase_find_device(-1); + if (!kbdev) + return NULL; + + product_id = kbdev->gpu_props.props.core_props.product_id; + + if (GPU_ID_IS_NEW_FORMAT(product_id)) { + switch (GPU_ID2_MODEL_MATCH_VALUE(product_id)) { + case GPU_ID2_PRODUCT_TMIX: + hardware_counters = hardware_counters_mali_tMIx; + count = ARRAY_SIZE(hardware_counters_mali_tMIx); + break; + case GPU_ID2_PRODUCT_THEX: + hardware_counters = hardware_counters_mali_tHEx; + count = ARRAY_SIZE(hardware_counters_mali_tHEx); + break; + case GPU_ID2_PRODUCT_TSIX: + hardware_counters = hardware_counters_mali_tSIx; + count = ARRAY_SIZE(hardware_counters_mali_tSIx); + break; + case GPU_ID2_PRODUCT_TNOX: + hardware_counters = hardware_counters_mali_tNOx; + count = ARRAY_SIZE(hardware_counters_mali_tNOx); + break; + default: + hardware_counters = NULL; + count = 0; + dev_err(kbdev->dev, "Unrecognized product ID: %u\n", + product_id); + break; + } + } else { + switch (product_id) { + /* If we are using a Mali-T60x device */ + case GPU_ID_PI_T60X: + hardware_counters = hardware_counters_mali_t60x; + count = ARRAY_SIZE(hardware_counters_mali_t60x); + break; + /* If we are using a Mali-T62x device */ + case GPU_ID_PI_T62X: + hardware_counters = hardware_counters_mali_t62x; + count = ARRAY_SIZE(hardware_counters_mali_t62x); + break; + /* If we are using a Mali-T72x device */ + case GPU_ID_PI_T72X: + hardware_counters = hardware_counters_mali_t72x; + count = ARRAY_SIZE(hardware_counters_mali_t72x); + break; + /* If we are using a Mali-T76x device */ + case GPU_ID_PI_T76X: + hardware_counters = hardware_counters_mali_t76x; + count = ARRAY_SIZE(hardware_counters_mali_t76x); + break; + /* If we are using a Mali-T82x device */ + case GPU_ID_PI_T82X: + hardware_counters = hardware_counters_mali_t82x; + count = ARRAY_SIZE(hardware_counters_mali_t82x); + break; + /* If we are using a Mali-T83x device */ + case GPU_ID_PI_T83X: + hardware_counters = hardware_counters_mali_t83x; + count = ARRAY_SIZE(hardware_counters_mali_t83x); + break; + /* If we are using a Mali-T86x device */ + case GPU_ID_PI_T86X: + hardware_counters = hardware_counters_mali_t86x; + count = ARRAY_SIZE(hardware_counters_mali_t86x); + break; + /* If we are using a Mali-T88x device */ + case GPU_ID_PI_TFRX: + hardware_counters = hardware_counters_mali_t88x; + count = ARRAY_SIZE(hardware_counters_mali_t88x); + break; + default: + hardware_counters = NULL; + count = 0; + dev_err(kbdev->dev, "Unrecognized product ID: %u\n", + product_id); + break; + } + } + + /* Release the kbdev reference. */ + kbase_release_device(kbdev); + + *total_counters = count; + + /* If we return a string array take a reference on the module (or fail). */ + if (hardware_counters && !try_module_get(THIS_MODULE)) + return NULL; + + return hardware_counters; +} +KBASE_EXPORT_SYMBOL(kbase_gator_hwcnt_init_names); + +void kbase_gator_hwcnt_term_names(void) +{ + /* Release the module reference. */ + module_put(THIS_MODULE); +} +KBASE_EXPORT_SYMBOL(kbase_gator_hwcnt_term_names); + +struct kbase_gator_hwcnt_handles *kbase_gator_hwcnt_init(struct kbase_gator_hwcnt_info *in_out_info) +{ + struct kbase_gator_hwcnt_handles *hand; + struct kbase_uk_hwcnt_reader_setup setup; + uint32_t dump_size = 0, i = 0; + + if (!in_out_info) + return NULL; + + hand = kzalloc(sizeof(*hand), GFP_KERNEL); + if (!hand) + return NULL; + + INIT_WORK(&hand->dump_work, dump_worker); + spin_lock_init(&hand->dump_lock); + + /* Get the first device */ + hand->kbdev = kbase_find_device(-1); + if (!hand->kbdev) + goto free_hand; + + dump_size = kbase_vinstr_dump_size(hand->kbdev); + hand->vinstr_buffer = kzalloc(dump_size, GFP_KERNEL); + if (!hand->vinstr_buffer) + goto release_device; + in_out_info->kernel_dump_buffer = hand->vinstr_buffer; + + in_out_info->nr_cores = hand->kbdev->gpu_props.num_cores; + in_out_info->nr_core_groups = hand->kbdev->gpu_props.num_core_groups; + in_out_info->gpu_id = hand->kbdev->gpu_props.props.core_props.product_id; + + /* If we are using a v4 device (Mali-T6xx or Mali-T72x) */ + if (kbase_hw_has_feature(hand->kbdev, BASE_HW_FEATURE_V4)) { + uint32_t cg, j; + uint64_t core_mask; + + /* There are 8 hardware counters blocks per core group */ + in_out_info->hwc_layout = kmalloc(sizeof(enum hwc_type) * + MALI_MAX_NUM_BLOCKS_PER_GROUP * + in_out_info->nr_core_groups, GFP_KERNEL); + + if (!in_out_info->hwc_layout) + goto free_vinstr_buffer; + + dump_size = in_out_info->nr_core_groups * + MALI_MAX_NUM_BLOCKS_PER_GROUP * + MALI_COUNTERS_PER_BLOCK * + MALI_BYTES_PER_COUNTER; + + for (cg = 0; cg < in_out_info->nr_core_groups; cg++) { + core_mask = hand->kbdev->gpu_props.props.coherency_info.group[cg].core_mask; + + for (j = 0; j < MALI_MAX_CORES_PER_GROUP; j++) { + if (core_mask & (1u << j)) + in_out_info->hwc_layout[i++] = SHADER_BLOCK; + else + in_out_info->hwc_layout[i++] = RESERVED_BLOCK; + } + + in_out_info->hwc_layout[i++] = TILER_BLOCK; + in_out_info->hwc_layout[i++] = MMU_L2_BLOCK; + + in_out_info->hwc_layout[i++] = RESERVED_BLOCK; + + if (0 == cg) + in_out_info->hwc_layout[i++] = JM_BLOCK; + else + in_out_info->hwc_layout[i++] = RESERVED_BLOCK; + } + /* If we are using any other device */ + } else { + uint32_t nr_l2, nr_sc_bits, j; + uint64_t core_mask; + + nr_l2 = hand->kbdev->gpu_props.props.l2_props.num_l2_slices; + + core_mask = hand->kbdev->gpu_props.props.coherency_info.group[0].core_mask; + + nr_sc_bits = fls64(core_mask); + + /* The job manager and tiler sets of counters + * are always present */ + in_out_info->hwc_layout = kmalloc(sizeof(enum hwc_type) * (2 + nr_sc_bits + nr_l2), GFP_KERNEL); + + if (!in_out_info->hwc_layout) + goto free_vinstr_buffer; + + dump_size = (2 + nr_sc_bits + nr_l2) * MALI_COUNTERS_PER_BLOCK * MALI_BYTES_PER_COUNTER; + + in_out_info->hwc_layout[i++] = JM_BLOCK; + in_out_info->hwc_layout[i++] = TILER_BLOCK; + + for (j = 0; j < nr_l2; j++) + in_out_info->hwc_layout[i++] = MMU_L2_BLOCK; + + while (core_mask != 0ull) { + if ((core_mask & 1ull) != 0ull) + in_out_info->hwc_layout[i++] = SHADER_BLOCK; + else + in_out_info->hwc_layout[i++] = RESERVED_BLOCK; + core_mask >>= 1; + } + } + + in_out_info->nr_hwc_blocks = i; + in_out_info->size = dump_size; + + setup.jm_bm = in_out_info->bitmask[0]; + setup.tiler_bm = in_out_info->bitmask[1]; + setup.shader_bm = in_out_info->bitmask[2]; + setup.mmu_l2_bm = in_out_info->bitmask[3]; + hand->vinstr_cli = kbase_vinstr_hwcnt_kernel_setup(hand->kbdev->vinstr_ctx, + &setup, hand->vinstr_buffer); + if (!hand->vinstr_cli) { + dev_err(hand->kbdev->dev, "Failed to register gator with vinstr core"); + goto free_layout; + } + + return hand; + +free_layout: + kfree(in_out_info->hwc_layout); + +free_vinstr_buffer: + kfree(hand->vinstr_buffer); + +release_device: + kbase_release_device(hand->kbdev); + +free_hand: + kfree(hand); + return NULL; +} +KBASE_EXPORT_SYMBOL(kbase_gator_hwcnt_init); + +void kbase_gator_hwcnt_term(struct kbase_gator_hwcnt_info *in_out_info, struct kbase_gator_hwcnt_handles *opaque_handles) +{ + if (in_out_info) + kfree(in_out_info->hwc_layout); + + if (opaque_handles) { + cancel_work_sync(&opaque_handles->dump_work); + kbase_vinstr_detach_client(opaque_handles->vinstr_cli); + kfree(opaque_handles->vinstr_buffer); + kbase_release_device(opaque_handles->kbdev); + kfree(opaque_handles); + } +} +KBASE_EXPORT_SYMBOL(kbase_gator_hwcnt_term); + +static void dump_worker(struct work_struct *work) +{ + struct kbase_gator_hwcnt_handles *hand; + + hand = container_of(work, struct kbase_gator_hwcnt_handles, dump_work); + if (!kbase_vinstr_hwc_dump(hand->vinstr_cli, + BASE_HWCNT_READER_EVENT_MANUAL)) { + spin_lock_bh(&hand->dump_lock); + hand->dump_complete = 1; + spin_unlock_bh(&hand->dump_lock); + } else { + schedule_work(&hand->dump_work); + } +} + +uint32_t kbase_gator_instr_hwcnt_dump_complete( + struct kbase_gator_hwcnt_handles *opaque_handles, + uint32_t * const success) +{ + + if (opaque_handles && success) { + *success = opaque_handles->dump_complete; + opaque_handles->dump_complete = 0; + return *success; + } + return 0; +} +KBASE_EXPORT_SYMBOL(kbase_gator_instr_hwcnt_dump_complete); + +uint32_t kbase_gator_instr_hwcnt_dump_irq(struct kbase_gator_hwcnt_handles *opaque_handles) +{ + if (opaque_handles) + schedule_work(&opaque_handles->dump_work); + return 0; +} +KBASE_EXPORT_SYMBOL(kbase_gator_instr_hwcnt_dump_irq); diff --git a/drivers/gpu/arm/midgard/mali_kbase_gator_api.h b/drivers/gpu/arm/midgard/mali_kbase_gator_api.h new file mode 100644 index 00000000000000..ef9ac0f7b633d9 --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_gator_api.h @@ -0,0 +1,219 @@ +/* + * + * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +#ifndef _KBASE_GATOR_API_H_ +#define _KBASE_GATOR_API_H_ + +/** + * @brief This file describes the API used by Gator to fetch hardware counters. + */ + +/* This define is used by the gator kernel module compile to select which DDK + * API calling convention to use. If not defined (legacy DDK) gator assumes + * version 1. The version to DDK release mapping is: + * Version 1 API: DDK versions r1px, r2px + * Version 2 API: DDK versions r3px, r4px + * Version 3 API: DDK version r5p0 and newer + * + * API Usage + * ========= + * + * 1] Call kbase_gator_hwcnt_init_names() to return the list of short counter + * names for the GPU present in this device. + * + * 2] Create a kbase_gator_hwcnt_info structure and set the counter enables for + * the counters you want enabled. The enables can all be set for simplicity in + * most use cases, but disabling some will let you minimize bandwidth impact. + * + * 3] Call kbase_gator_hwcnt_init() using the above structure, to create a + * counter context. On successful return the DDK will have populated the + * structure with a variety of useful information. + * + * 4] Call kbase_gator_hwcnt_dump_irq() to queue a non-blocking request for a + * counter dump. If this returns a non-zero value the request has been queued, + * otherwise the driver has been unable to do so (typically because of another + * user of the instrumentation exists concurrently). + * + * 5] Call kbase_gator_hwcnt_dump_complete() to test whether the previously + * requested dump has been succesful. If this returns non-zero the counter dump + * has resolved, but the value of *success must also be tested as the dump + * may have not been successful. If it returns zero the counter dump was + * abandoned due to the device being busy (typically because of another + * user of the instrumentation exists concurrently). + * + * 6] Process the counters stored in the buffer pointed to by ... + * + * kbase_gator_hwcnt_info->kernel_dump_buffer + * + * In pseudo code you can find all of the counters via this approach: + * + * + * hwcnt_info # pointer to kbase_gator_hwcnt_info structure + * hwcnt_name # pointer to name list + * + * u32 * hwcnt_data = (u32*)hwcnt_info->kernel_dump_buffer + * + * # Iterate over each 64-counter block in this GPU configuration + * for( i = 0; i < hwcnt_info->nr_hwc_blocks; i++) { + * hwc_type type = hwcnt_info->hwc_layout[i]; + * + * # Skip reserved type blocks - they contain no counters at all + * if( type == RESERVED_BLOCK ) { + * continue; + * } + * + * size_t name_offset = type * 64; + * size_t data_offset = i * 64; + * + * # Iterate over the names of the counters in this block type + * for( j = 0; j < 64; j++) { + * const char * name = hwcnt_name[name_offset+j]; + * + * # Skip empty name strings - there is no counter here + * if( name[0] == '\0' ) { + * continue; + * } + * + * u32 data = hwcnt_data[data_offset+j]; + * + * printk( "COUNTER: %s DATA: %u\n", name, data ); + * } + * } + * + * + * Note that in most implementations you typically want to either SUM or + * AVERAGE multiple instances of the same counter if, for example, you have + * multiple shader cores or multiple L2 caches. The most sensible view for + * analysis is to AVERAGE shader core counters, but SUM L2 cache and MMU + * counters. + * + * 7] Goto 4, repeating until you want to stop collecting counters. + * + * 8] Release the dump resources by calling kbase_gator_hwcnt_term(). + * + * 9] Release the name table resources by calling + * kbase_gator_hwcnt_term_names(). This function must only be called if + * init_names() returned a non-NULL value. + **/ + +#define MALI_DDK_GATOR_API_VERSION 3 + +enum hwc_type { + JM_BLOCK = 0, + TILER_BLOCK, + SHADER_BLOCK, + MMU_L2_BLOCK, + RESERVED_BLOCK +}; + +struct kbase_gator_hwcnt_info { + /* Passed from Gator to kbase */ + + /* the bitmask of enabled hardware counters for each counter block */ + uint16_t bitmask[4]; + + /* Passed from kbase to Gator */ + + /* ptr to counter dump memory */ + void *kernel_dump_buffer; + + /* size of counter dump memory */ + uint32_t size; + + /* the ID of the Mali device */ + uint32_t gpu_id; + + /* the number of shader cores in the GPU */ + uint32_t nr_cores; + + /* the number of core groups */ + uint32_t nr_core_groups; + + /* the memory layout of the performance counters */ + enum hwc_type *hwc_layout; + + /* the total number of hardware couter blocks */ + uint32_t nr_hwc_blocks; +}; + +/** + * @brief Opaque block of Mali data which Gator needs to return to the API later. + */ +struct kbase_gator_hwcnt_handles; + +/** + * @brief Initialize the resources Gator needs for performance profiling. + * + * @param in_out_info A pointer to a structure containing the enabled counters passed from Gator and all the Mali + * specific information that will be returned to Gator. On entry Gator must have populated the + * 'bitmask' field with the counters it wishes to enable for each class of counter block. + * Each entry in the array corresponds to a single counter class based on the "hwc_type" + * enumeration, and each bit corresponds to an enable for 4 sequential counters (LSB enables + * the first 4 counters in the block, and so on). See the GPU counter array as returned by + * kbase_gator_hwcnt_get_names() for the index values of each counter for the curernt GPU. + * + * @return Pointer to an opaque handle block on success, NULL on error. + */ +extern struct kbase_gator_hwcnt_handles *kbase_gator_hwcnt_init(struct kbase_gator_hwcnt_info *in_out_info); + +/** + * @brief Free all resources once Gator has finished using performance counters. + * + * @param in_out_info A pointer to a structure containing the enabled counters passed from Gator and all the + * Mali specific information that will be returned to Gator. + * @param opaque_handles A wrapper structure for kbase structures. + */ +extern void kbase_gator_hwcnt_term(struct kbase_gator_hwcnt_info *in_out_info, struct kbase_gator_hwcnt_handles *opaque_handles); + +/** + * @brief Poll whether a counter dump is successful. + * + * @param opaque_handles A wrapper structure for kbase structures. + * @param[out] success Non-zero on success, zero on failure. + * + * @return Zero if the dump is still pending, non-zero if the dump has completed. Note that a + * completed dump may not have dumped succesfully, so the caller must test for both + * a completed and successful dump before processing counters. + */ +extern uint32_t kbase_gator_instr_hwcnt_dump_complete(struct kbase_gator_hwcnt_handles *opaque_handles, uint32_t * const success); + +/** + * @brief Request the generation of a new counter dump. + * + * @param opaque_handles A wrapper structure for kbase structures. + * + * @return Zero if the hardware device is busy and cannot handle the request, non-zero otherwise. + */ +extern uint32_t kbase_gator_instr_hwcnt_dump_irq(struct kbase_gator_hwcnt_handles *opaque_handles); + +/** + * @brief This function is used to fetch the names table based on the Mali device in use. + * + * @param[out] total_counters The total number of counters short names in the Mali devices' list. + * + * @return Pointer to an array of strings of length *total_counters. + */ +extern const char * const *kbase_gator_hwcnt_init_names(uint32_t *total_counters); + +/** + * @brief This function is used to terminate the use of the names table. + * + * This function must only be called if the initial call to kbase_gator_hwcnt_init_names returned a non-NULL value. + */ +extern void kbase_gator_hwcnt_term_names(void); + +#endif diff --git a/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names.h b/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names.h new file mode 100644 index 00000000000000..6fe65306f0b0bc --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names.h @@ -0,0 +1,2169 @@ +/* + * + * (C) COPYRIGHT 2014-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +#ifndef _KBASE_GATOR_HWCNT_NAMES_H_ +#define _KBASE_GATOR_HWCNT_NAMES_H_ + +/* + * "Short names" for hardware counters used by Streamline. Counters names are + * stored in accordance with their memory layout in the binary counter block + * emitted by the Mali GPU. Each "master" in the GPU emits a fixed-size block + * of 64 counters, and each GPU implements the same set of "masters" although + * the counters each master exposes within its block of 64 may vary. + * + * Counters which are an empty string are simply "holes" in the counter memory + * where no counter exists. + */ + +static const char * const hardware_counters_mali_t60x[] = { + /* Job Manager */ + "", + "", + "", + "", + "T60x_MESSAGES_SENT", + "T60x_MESSAGES_RECEIVED", + "T60x_GPU_ACTIVE", + "T60x_IRQ_ACTIVE", + "T60x_JS0_JOBS", + "T60x_JS0_TASKS", + "T60x_JS0_ACTIVE", + "", + "T60x_JS0_WAIT_READ", + "T60x_JS0_WAIT_ISSUE", + "T60x_JS0_WAIT_DEPEND", + "T60x_JS0_WAIT_FINISH", + "T60x_JS1_JOBS", + "T60x_JS1_TASKS", + "T60x_JS1_ACTIVE", + "", + "T60x_JS1_WAIT_READ", + "T60x_JS1_WAIT_ISSUE", + "T60x_JS1_WAIT_DEPEND", + "T60x_JS1_WAIT_FINISH", + "T60x_JS2_JOBS", + "T60x_JS2_TASKS", + "T60x_JS2_ACTIVE", + "", + "T60x_JS2_WAIT_READ", + "T60x_JS2_WAIT_ISSUE", + "T60x_JS2_WAIT_DEPEND", + "T60x_JS2_WAIT_FINISH", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + + /*Tiler */ + "", + "", + "", + "T60x_TI_JOBS_PROCESSED", + "T60x_TI_TRIANGLES", + "T60x_TI_QUADS", + "T60x_TI_POLYGONS", + "T60x_TI_POINTS", + "T60x_TI_LINES", + "T60x_TI_VCACHE_HIT", + "T60x_TI_VCACHE_MISS", + "T60x_TI_FRONT_FACING", + "T60x_TI_BACK_FACING", + "T60x_TI_PRIM_VISIBLE", + "T60x_TI_PRIM_CULLED", + "T60x_TI_PRIM_CLIPPED", + "T60x_TI_LEVEL0", + "T60x_TI_LEVEL1", + "T60x_TI_LEVEL2", + "T60x_TI_LEVEL3", + "T60x_TI_LEVEL4", + "T60x_TI_LEVEL5", + "T60x_TI_LEVEL6", + "T60x_TI_LEVEL7", + "T60x_TI_COMMAND_1", + "T60x_TI_COMMAND_2", + "T60x_TI_COMMAND_3", + "T60x_TI_COMMAND_4", + "T60x_TI_COMMAND_4_7", + "T60x_TI_COMMAND_8_15", + "T60x_TI_COMMAND_16_63", + "T60x_TI_COMMAND_64", + "T60x_TI_COMPRESS_IN", + "T60x_TI_COMPRESS_OUT", + "T60x_TI_COMPRESS_FLUSH", + "T60x_TI_TIMESTAMPS", + "T60x_TI_PCACHE_HIT", + "T60x_TI_PCACHE_MISS", + "T60x_TI_PCACHE_LINE", + "T60x_TI_PCACHE_STALL", + "T60x_TI_WRBUF_HIT", + "T60x_TI_WRBUF_MISS", + "T60x_TI_WRBUF_LINE", + "T60x_TI_WRBUF_PARTIAL", + "T60x_TI_WRBUF_STALL", + "T60x_TI_ACTIVE", + "T60x_TI_LOADING_DESC", + "T60x_TI_INDEX_WAIT", + "T60x_TI_INDEX_RANGE_WAIT", + "T60x_TI_VERTEX_WAIT", + "T60x_TI_PCACHE_WAIT", + "T60x_TI_WRBUF_WAIT", + "T60x_TI_BUS_READ", + "T60x_TI_BUS_WRITE", + "", + "", + "", + "", + "", + "T60x_TI_UTLB_STALL", + "T60x_TI_UTLB_REPLAY_MISS", + "T60x_TI_UTLB_REPLAY_FULL", + "T60x_TI_UTLB_NEW_MISS", + "T60x_TI_UTLB_HIT", + + /* Shader Core */ + "", + "", + "", + "", + "T60x_FRAG_ACTIVE", + "T60x_FRAG_PRIMITIVES", + "T60x_FRAG_PRIMITIVES_DROPPED", + "T60x_FRAG_CYCLES_DESC", + "T60x_FRAG_CYCLES_PLR", + "T60x_FRAG_CYCLES_VERT", + "T60x_FRAG_CYCLES_TRISETUP", + "T60x_FRAG_CYCLES_RAST", + "T60x_FRAG_THREADS", + "T60x_FRAG_DUMMY_THREADS", + "T60x_FRAG_QUADS_RAST", + "T60x_FRAG_QUADS_EZS_TEST", + "T60x_FRAG_QUADS_EZS_KILLED", + "T60x_FRAG_THREADS_LZS_TEST", + "T60x_FRAG_THREADS_LZS_KILLED", + "T60x_FRAG_CYCLES_NO_TILE", + "T60x_FRAG_NUM_TILES", + "T60x_FRAG_TRANS_ELIM", + "T60x_COMPUTE_ACTIVE", + "T60x_COMPUTE_TASKS", + "T60x_COMPUTE_THREADS", + "T60x_COMPUTE_CYCLES_DESC", + "T60x_TRIPIPE_ACTIVE", + "T60x_ARITH_WORDS", + "T60x_ARITH_CYCLES_REG", + "T60x_ARITH_CYCLES_L0", + "T60x_ARITH_FRAG_DEPEND", + "T60x_LS_WORDS", + "T60x_LS_ISSUES", + "T60x_LS_RESTARTS", + "T60x_LS_REISSUES_MISS", + "T60x_LS_REISSUES_VD", + "T60x_LS_REISSUE_ATTRIB_MISS", + "T60x_LS_NO_WB", + "T60x_TEX_WORDS", + "T60x_TEX_BUBBLES", + "T60x_TEX_WORDS_L0", + "T60x_TEX_WORDS_DESC", + "T60x_TEX_ISSUES", + "T60x_TEX_RECIRC_FMISS", + "T60x_TEX_RECIRC_DESC", + "T60x_TEX_RECIRC_MULTI", + "T60x_TEX_RECIRC_PMISS", + "T60x_TEX_RECIRC_CONF", + "T60x_LSC_READ_HITS", + "T60x_LSC_READ_MISSES", + "T60x_LSC_WRITE_HITS", + "T60x_LSC_WRITE_MISSES", + "T60x_LSC_ATOMIC_HITS", + "T60x_LSC_ATOMIC_MISSES", + "T60x_LSC_LINE_FETCHES", + "T60x_LSC_DIRTY_LINE", + "T60x_LSC_SNOOPS", + "T60x_AXI_TLB_STALL", + "T60x_AXI_TLB_MISS", + "T60x_AXI_TLB_TRANSACTION", + "T60x_LS_TLB_MISS", + "T60x_LS_TLB_HIT", + "T60x_AXI_BEATS_READ", + "T60x_AXI_BEATS_WRITTEN", + + /*L2 and MMU */ + "", + "", + "", + "", + "T60x_MMU_HIT", + "T60x_MMU_NEW_MISS", + "T60x_MMU_REPLAY_FULL", + "T60x_MMU_REPLAY_MISS", + "T60x_MMU_TABLE_WALK", + "", + "", + "", + "", + "", + "", + "", + "T60x_UTLB_HIT", + "T60x_UTLB_NEW_MISS", + "T60x_UTLB_REPLAY_FULL", + "T60x_UTLB_REPLAY_MISS", + "T60x_UTLB_STALL", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "T60x_L2_EXT_WRITE_BEATS", + "T60x_L2_EXT_READ_BEATS", + "T60x_L2_ANY_LOOKUP", + "T60x_L2_READ_LOOKUP", + "T60x_L2_SREAD_LOOKUP", + "T60x_L2_READ_REPLAY", + "T60x_L2_READ_SNOOP", + "T60x_L2_READ_HIT", + "T60x_L2_CLEAN_MISS", + "T60x_L2_WRITE_LOOKUP", + "T60x_L2_SWRITE_LOOKUP", + "T60x_L2_WRITE_REPLAY", + "T60x_L2_WRITE_SNOOP", + "T60x_L2_WRITE_HIT", + "T60x_L2_EXT_READ_FULL", + "T60x_L2_EXT_READ_HALF", + "T60x_L2_EXT_WRITE_FULL", + "T60x_L2_EXT_WRITE_HALF", + "T60x_L2_EXT_READ", + "T60x_L2_EXT_READ_LINE", + "T60x_L2_EXT_WRITE", + "T60x_L2_EXT_WRITE_LINE", + "T60x_L2_EXT_WRITE_SMALL", + "T60x_L2_EXT_BARRIER", + "T60x_L2_EXT_AR_STALL", + "T60x_L2_EXT_R_BUF_FULL", + "T60x_L2_EXT_RD_BUF_FULL", + "T60x_L2_EXT_R_RAW", + "T60x_L2_EXT_W_STALL", + "T60x_L2_EXT_W_BUF_FULL", + "T60x_L2_EXT_R_W_HAZARD", + "T60x_L2_TAG_HAZARD", + "T60x_L2_SNOOP_FULL", + "T60x_L2_REPLAY_FULL" +}; +static const char * const hardware_counters_mali_t62x[] = { + /* Job Manager */ + "", + "", + "", + "", + "T62x_MESSAGES_SENT", + "T62x_MESSAGES_RECEIVED", + "T62x_GPU_ACTIVE", + "T62x_IRQ_ACTIVE", + "T62x_JS0_JOBS", + "T62x_JS0_TASKS", + "T62x_JS0_ACTIVE", + "", + "T62x_JS0_WAIT_READ", + "T62x_JS0_WAIT_ISSUE", + "T62x_JS0_WAIT_DEPEND", + "T62x_JS0_WAIT_FINISH", + "T62x_JS1_JOBS", + "T62x_JS1_TASKS", + "T62x_JS1_ACTIVE", + "", + "T62x_JS1_WAIT_READ", + "T62x_JS1_WAIT_ISSUE", + "T62x_JS1_WAIT_DEPEND", + "T62x_JS1_WAIT_FINISH", + "T62x_JS2_JOBS", + "T62x_JS2_TASKS", + "T62x_JS2_ACTIVE", + "", + "T62x_JS2_WAIT_READ", + "T62x_JS2_WAIT_ISSUE", + "T62x_JS2_WAIT_DEPEND", + "T62x_JS2_WAIT_FINISH", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + + /*Tiler */ + "", + "", + "", + "T62x_TI_JOBS_PROCESSED", + "T62x_TI_TRIANGLES", + "T62x_TI_QUADS", + "T62x_TI_POLYGONS", + "T62x_TI_POINTS", + "T62x_TI_LINES", + "T62x_TI_VCACHE_HIT", + "T62x_TI_VCACHE_MISS", + "T62x_TI_FRONT_FACING", + "T62x_TI_BACK_FACING", + "T62x_TI_PRIM_VISIBLE", + "T62x_TI_PRIM_CULLED", + "T62x_TI_PRIM_CLIPPED", + "T62x_TI_LEVEL0", + "T62x_TI_LEVEL1", + "T62x_TI_LEVEL2", + "T62x_TI_LEVEL3", + "T62x_TI_LEVEL4", + "T62x_TI_LEVEL5", + "T62x_TI_LEVEL6", + "T62x_TI_LEVEL7", + "T62x_TI_COMMAND_1", + "T62x_TI_COMMAND_2", + "T62x_TI_COMMAND_3", + "T62x_TI_COMMAND_4", + "T62x_TI_COMMAND_5_7", + "T62x_TI_COMMAND_8_15", + "T62x_TI_COMMAND_16_63", + "T62x_TI_COMMAND_64", + "T62x_TI_COMPRESS_IN", + "T62x_TI_COMPRESS_OUT", + "T62x_TI_COMPRESS_FLUSH", + "T62x_TI_TIMESTAMPS", + "T62x_TI_PCACHE_HIT", + "T62x_TI_PCACHE_MISS", + "T62x_TI_PCACHE_LINE", + "T62x_TI_PCACHE_STALL", + "T62x_TI_WRBUF_HIT", + "T62x_TI_WRBUF_MISS", + "T62x_TI_WRBUF_LINE", + "T62x_TI_WRBUF_PARTIAL", + "T62x_TI_WRBUF_STALL", + "T62x_TI_ACTIVE", + "T62x_TI_LOADING_DESC", + "T62x_TI_INDEX_WAIT", + "T62x_TI_INDEX_RANGE_WAIT", + "T62x_TI_VERTEX_WAIT", + "T62x_TI_PCACHE_WAIT", + "T62x_TI_WRBUF_WAIT", + "T62x_TI_BUS_READ", + "T62x_TI_BUS_WRITE", + "", + "", + "", + "", + "", + "T62x_TI_UTLB_STALL", + "T62x_TI_UTLB_REPLAY_MISS", + "T62x_TI_UTLB_REPLAY_FULL", + "T62x_TI_UTLB_NEW_MISS", + "T62x_TI_UTLB_HIT", + + /* Shader Core */ + "", + "", + "", + "T62x_SHADER_CORE_ACTIVE", + "T62x_FRAG_ACTIVE", + "T62x_FRAG_PRIMITIVES", + "T62x_FRAG_PRIMITIVES_DROPPED", + "T62x_FRAG_CYCLES_DESC", + "T62x_FRAG_CYCLES_FPKQ_ACTIVE", + "T62x_FRAG_CYCLES_VERT", + "T62x_FRAG_CYCLES_TRISETUP", + "T62x_FRAG_CYCLES_EZS_ACTIVE", + "T62x_FRAG_THREADS", + "T62x_FRAG_DUMMY_THREADS", + "T62x_FRAG_QUADS_RAST", + "T62x_FRAG_QUADS_EZS_TEST", + "T62x_FRAG_QUADS_EZS_KILLED", + "T62x_FRAG_THREADS_LZS_TEST", + "T62x_FRAG_THREADS_LZS_KILLED", + "T62x_FRAG_CYCLES_NO_TILE", + "T62x_FRAG_NUM_TILES", + "T62x_FRAG_TRANS_ELIM", + "T62x_COMPUTE_ACTIVE", + "T62x_COMPUTE_TASKS", + "T62x_COMPUTE_THREADS", + "T62x_COMPUTE_CYCLES_DESC", + "T62x_TRIPIPE_ACTIVE", + "T62x_ARITH_WORDS", + "T62x_ARITH_CYCLES_REG", + "T62x_ARITH_CYCLES_L0", + "T62x_ARITH_FRAG_DEPEND", + "T62x_LS_WORDS", + "T62x_LS_ISSUES", + "T62x_LS_RESTARTS", + "T62x_LS_REISSUES_MISS", + "T62x_LS_REISSUES_VD", + "T62x_LS_REISSUE_ATTRIB_MISS", + "T62x_LS_NO_WB", + "T62x_TEX_WORDS", + "T62x_TEX_BUBBLES", + "T62x_TEX_WORDS_L0", + "T62x_TEX_WORDS_DESC", + "T62x_TEX_ISSUES", + "T62x_TEX_RECIRC_FMISS", + "T62x_TEX_RECIRC_DESC", + "T62x_TEX_RECIRC_MULTI", + "T62x_TEX_RECIRC_PMISS", + "T62x_TEX_RECIRC_CONF", + "T62x_LSC_READ_HITS", + "T62x_LSC_READ_MISSES", + "T62x_LSC_WRITE_HITS", + "T62x_LSC_WRITE_MISSES", + "T62x_LSC_ATOMIC_HITS", + "T62x_LSC_ATOMIC_MISSES", + "T62x_LSC_LINE_FETCHES", + "T62x_LSC_DIRTY_LINE", + "T62x_LSC_SNOOPS", + "T62x_AXI_TLB_STALL", + "T62x_AXI_TLB_MISS", + "T62x_AXI_TLB_TRANSACTION", + "T62x_LS_TLB_MISS", + "T62x_LS_TLB_HIT", + "T62x_AXI_BEATS_READ", + "T62x_AXI_BEATS_WRITTEN", + + /*L2 and MMU */ + "", + "", + "", + "", + "T62x_MMU_HIT", + "T62x_MMU_NEW_MISS", + "T62x_MMU_REPLAY_FULL", + "T62x_MMU_REPLAY_MISS", + "T62x_MMU_TABLE_WALK", + "", + "", + "", + "", + "", + "", + "", + "T62x_UTLB_HIT", + "T62x_UTLB_NEW_MISS", + "T62x_UTLB_REPLAY_FULL", + "T62x_UTLB_REPLAY_MISS", + "T62x_UTLB_STALL", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "T62x_L2_EXT_WRITE_BEATS", + "T62x_L2_EXT_READ_BEATS", + "T62x_L2_ANY_LOOKUP", + "T62x_L2_READ_LOOKUP", + "T62x_L2_SREAD_LOOKUP", + "T62x_L2_READ_REPLAY", + "T62x_L2_READ_SNOOP", + "T62x_L2_READ_HIT", + "T62x_L2_CLEAN_MISS", + "T62x_L2_WRITE_LOOKUP", + "T62x_L2_SWRITE_LOOKUP", + "T62x_L2_WRITE_REPLAY", + "T62x_L2_WRITE_SNOOP", + "T62x_L2_WRITE_HIT", + "T62x_L2_EXT_READ_FULL", + "T62x_L2_EXT_READ_HALF", + "T62x_L2_EXT_WRITE_FULL", + "T62x_L2_EXT_WRITE_HALF", + "T62x_L2_EXT_READ", + "T62x_L2_EXT_READ_LINE", + "T62x_L2_EXT_WRITE", + "T62x_L2_EXT_WRITE_LINE", + "T62x_L2_EXT_WRITE_SMALL", + "T62x_L2_EXT_BARRIER", + "T62x_L2_EXT_AR_STALL", + "T62x_L2_EXT_R_BUF_FULL", + "T62x_L2_EXT_RD_BUF_FULL", + "T62x_L2_EXT_R_RAW", + "T62x_L2_EXT_W_STALL", + "T62x_L2_EXT_W_BUF_FULL", + "T62x_L2_EXT_R_W_HAZARD", + "T62x_L2_TAG_HAZARD", + "T62x_L2_SNOOP_FULL", + "T62x_L2_REPLAY_FULL" +}; + +static const char * const hardware_counters_mali_t72x[] = { + /* Job Manager */ + "", + "", + "", + "", + "T72x_GPU_ACTIVE", + "T72x_IRQ_ACTIVE", + "T72x_JS0_JOBS", + "T72x_JS0_TASKS", + "T72x_JS0_ACTIVE", + "T72x_JS1_JOBS", + "T72x_JS1_TASKS", + "T72x_JS1_ACTIVE", + "T72x_JS2_JOBS", + "T72x_JS2_TASKS", + "T72x_JS2_ACTIVE", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + + /*Tiler */ + "", + "", + "", + "T72x_TI_JOBS_PROCESSED", + "T72x_TI_TRIANGLES", + "T72x_TI_QUADS", + "T72x_TI_POLYGONS", + "T72x_TI_POINTS", + "T72x_TI_LINES", + "T72x_TI_FRONT_FACING", + "T72x_TI_BACK_FACING", + "T72x_TI_PRIM_VISIBLE", + "T72x_TI_PRIM_CULLED", + "T72x_TI_PRIM_CLIPPED", + "", + "", + "", + "", + "", + "", + "", + "", + "T72x_TI_ACTIVE", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + + /* Shader Core */ + "", + "", + "", + "", + "T72x_FRAG_ACTIVE", + "T72x_FRAG_PRIMITIVES", + "T72x_FRAG_PRIMITIVES_DROPPED", + "T72x_FRAG_THREADS", + "T72x_FRAG_DUMMY_THREADS", + "T72x_FRAG_QUADS_RAST", + "T72x_FRAG_QUADS_EZS_TEST", + "T72x_FRAG_QUADS_EZS_KILLED", + "T72x_FRAG_THREADS_LZS_TEST", + "T72x_FRAG_THREADS_LZS_KILLED", + "T72x_FRAG_CYCLES_NO_TILE", + "T72x_FRAG_NUM_TILES", + "T72x_FRAG_TRANS_ELIM", + "T72x_COMPUTE_ACTIVE", + "T72x_COMPUTE_TASKS", + "T72x_COMPUTE_THREADS", + "T72x_TRIPIPE_ACTIVE", + "T72x_ARITH_WORDS", + "T72x_ARITH_CYCLES_REG", + "T72x_LS_WORDS", + "T72x_LS_ISSUES", + "T72x_LS_RESTARTS", + "T72x_LS_REISSUES_MISS", + "T72x_TEX_WORDS", + "T72x_TEX_BUBBLES", + "T72x_TEX_ISSUES", + "T72x_LSC_READ_HITS", + "T72x_LSC_READ_MISSES", + "T72x_LSC_WRITE_HITS", + "T72x_LSC_WRITE_MISSES", + "T72x_LSC_ATOMIC_HITS", + "T72x_LSC_ATOMIC_MISSES", + "T72x_LSC_LINE_FETCHES", + "T72x_LSC_DIRTY_LINE", + "T72x_LSC_SNOOPS", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + + /*L2 and MMU */ + "", + "", + "", + "", + "T72x_L2_EXT_WRITE_BEAT", + "T72x_L2_EXT_READ_BEAT", + "T72x_L2_READ_SNOOP", + "T72x_L2_READ_HIT", + "T72x_L2_WRITE_SNOOP", + "T72x_L2_WRITE_HIT", + "T72x_L2_EXT_WRITE_SMALL", + "T72x_L2_EXT_BARRIER", + "T72x_L2_EXT_AR_STALL", + "T72x_L2_EXT_W_STALL", + "T72x_L2_SNOOP_FULL", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" +}; + +static const char * const hardware_counters_mali_t76x[] = { + /* Job Manager */ + "", + "", + "", + "", + "T76x_MESSAGES_SENT", + "T76x_MESSAGES_RECEIVED", + "T76x_GPU_ACTIVE", + "T76x_IRQ_ACTIVE", + "T76x_JS0_JOBS", + "T76x_JS0_TASKS", + "T76x_JS0_ACTIVE", + "", + "T76x_JS0_WAIT_READ", + "T76x_JS0_WAIT_ISSUE", + "T76x_JS0_WAIT_DEPEND", + "T76x_JS0_WAIT_FINISH", + "T76x_JS1_JOBS", + "T76x_JS1_TASKS", + "T76x_JS1_ACTIVE", + "", + "T76x_JS1_WAIT_READ", + "T76x_JS1_WAIT_ISSUE", + "T76x_JS1_WAIT_DEPEND", + "T76x_JS1_WAIT_FINISH", + "T76x_JS2_JOBS", + "T76x_JS2_TASKS", + "T76x_JS2_ACTIVE", + "", + "T76x_JS2_WAIT_READ", + "T76x_JS2_WAIT_ISSUE", + "T76x_JS2_WAIT_DEPEND", + "T76x_JS2_WAIT_FINISH", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + + /*Tiler */ + "", + "", + "", + "T76x_TI_JOBS_PROCESSED", + "T76x_TI_TRIANGLES", + "T76x_TI_QUADS", + "T76x_TI_POLYGONS", + "T76x_TI_POINTS", + "T76x_TI_LINES", + "T76x_TI_VCACHE_HIT", + "T76x_TI_VCACHE_MISS", + "T76x_TI_FRONT_FACING", + "T76x_TI_BACK_FACING", + "T76x_TI_PRIM_VISIBLE", + "T76x_TI_PRIM_CULLED", + "T76x_TI_PRIM_CLIPPED", + "T76x_TI_LEVEL0", + "T76x_TI_LEVEL1", + "T76x_TI_LEVEL2", + "T76x_TI_LEVEL3", + "T76x_TI_LEVEL4", + "T76x_TI_LEVEL5", + "T76x_TI_LEVEL6", + "T76x_TI_LEVEL7", + "T76x_TI_COMMAND_1", + "T76x_TI_COMMAND_2", + "T76x_TI_COMMAND_3", + "T76x_TI_COMMAND_4", + "T76x_TI_COMMAND_5_7", + "T76x_TI_COMMAND_8_15", + "T76x_TI_COMMAND_16_63", + "T76x_TI_COMMAND_64", + "T76x_TI_COMPRESS_IN", + "T76x_TI_COMPRESS_OUT", + "T76x_TI_COMPRESS_FLUSH", + "T76x_TI_TIMESTAMPS", + "T76x_TI_PCACHE_HIT", + "T76x_TI_PCACHE_MISS", + "T76x_TI_PCACHE_LINE", + "T76x_TI_PCACHE_STALL", + "T76x_TI_WRBUF_HIT", + "T76x_TI_WRBUF_MISS", + "T76x_TI_WRBUF_LINE", + "T76x_TI_WRBUF_PARTIAL", + "T76x_TI_WRBUF_STALL", + "T76x_TI_ACTIVE", + "T76x_TI_LOADING_DESC", + "T76x_TI_INDEX_WAIT", + "T76x_TI_INDEX_RANGE_WAIT", + "T76x_TI_VERTEX_WAIT", + "T76x_TI_PCACHE_WAIT", + "T76x_TI_WRBUF_WAIT", + "T76x_TI_BUS_READ", + "T76x_TI_BUS_WRITE", + "", + "", + "", + "", + "", + "T76x_TI_UTLB_HIT", + "T76x_TI_UTLB_NEW_MISS", + "T76x_TI_UTLB_REPLAY_FULL", + "T76x_TI_UTLB_REPLAY_MISS", + "T76x_TI_UTLB_STALL", + + /* Shader Core */ + "", + "", + "", + "", + "T76x_FRAG_ACTIVE", + "T76x_FRAG_PRIMITIVES", + "T76x_FRAG_PRIMITIVES_DROPPED", + "T76x_FRAG_CYCLES_DESC", + "T76x_FRAG_CYCLES_FPKQ_ACTIVE", + "T76x_FRAG_CYCLES_VERT", + "T76x_FRAG_CYCLES_TRISETUP", + "T76x_FRAG_CYCLES_EZS_ACTIVE", + "T76x_FRAG_THREADS", + "T76x_FRAG_DUMMY_THREADS", + "T76x_FRAG_QUADS_RAST", + "T76x_FRAG_QUADS_EZS_TEST", + "T76x_FRAG_QUADS_EZS_KILLED", + "T76x_FRAG_THREADS_LZS_TEST", + "T76x_FRAG_THREADS_LZS_KILLED", + "T76x_FRAG_CYCLES_NO_TILE", + "T76x_FRAG_NUM_TILES", + "T76x_FRAG_TRANS_ELIM", + "T76x_COMPUTE_ACTIVE", + "T76x_COMPUTE_TASKS", + "T76x_COMPUTE_THREADS", + "T76x_COMPUTE_CYCLES_DESC", + "T76x_TRIPIPE_ACTIVE", + "T76x_ARITH_WORDS", + "T76x_ARITH_CYCLES_REG", + "T76x_ARITH_CYCLES_L0", + "T76x_ARITH_FRAG_DEPEND", + "T76x_LS_WORDS", + "T76x_LS_ISSUES", + "T76x_LS_REISSUE_ATTR", + "T76x_LS_REISSUES_VARY", + "T76x_LS_VARY_RV_MISS", + "T76x_LS_VARY_RV_HIT", + "T76x_LS_NO_UNPARK", + "T76x_TEX_WORDS", + "T76x_TEX_BUBBLES", + "T76x_TEX_WORDS_L0", + "T76x_TEX_WORDS_DESC", + "T76x_TEX_ISSUES", + "T76x_TEX_RECIRC_FMISS", + "T76x_TEX_RECIRC_DESC", + "T76x_TEX_RECIRC_MULTI", + "T76x_TEX_RECIRC_PMISS", + "T76x_TEX_RECIRC_CONF", + "T76x_LSC_READ_HITS", + "T76x_LSC_READ_OP", + "T76x_LSC_WRITE_HITS", + "T76x_LSC_WRITE_OP", + "T76x_LSC_ATOMIC_HITS", + "T76x_LSC_ATOMIC_OP", + "T76x_LSC_LINE_FETCHES", + "T76x_LSC_DIRTY_LINE", + "T76x_LSC_SNOOPS", + "T76x_AXI_TLB_STALL", + "T76x_AXI_TLB_MISS", + "T76x_AXI_TLB_TRANSACTION", + "T76x_LS_TLB_MISS", + "T76x_LS_TLB_HIT", + "T76x_AXI_BEATS_READ", + "T76x_AXI_BEATS_WRITTEN", + + /*L2 and MMU */ + "", + "", + "", + "", + "T76x_MMU_HIT", + "T76x_MMU_NEW_MISS", + "T76x_MMU_REPLAY_FULL", + "T76x_MMU_REPLAY_MISS", + "T76x_MMU_TABLE_WALK", + "T76x_MMU_REQUESTS", + "", + "", + "T76x_UTLB_HIT", + "T76x_UTLB_NEW_MISS", + "T76x_UTLB_REPLAY_FULL", + "T76x_UTLB_REPLAY_MISS", + "T76x_UTLB_STALL", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "T76x_L2_EXT_WRITE_BEATS", + "T76x_L2_EXT_READ_BEATS", + "T76x_L2_ANY_LOOKUP", + "T76x_L2_READ_LOOKUP", + "T76x_L2_SREAD_LOOKUP", + "T76x_L2_READ_REPLAY", + "T76x_L2_READ_SNOOP", + "T76x_L2_READ_HIT", + "T76x_L2_CLEAN_MISS", + "T76x_L2_WRITE_LOOKUP", + "T76x_L2_SWRITE_LOOKUP", + "T76x_L2_WRITE_REPLAY", + "T76x_L2_WRITE_SNOOP", + "T76x_L2_WRITE_HIT", + "T76x_L2_EXT_READ_FULL", + "", + "T76x_L2_EXT_WRITE_FULL", + "T76x_L2_EXT_R_W_HAZARD", + "T76x_L2_EXT_READ", + "T76x_L2_EXT_READ_LINE", + "T76x_L2_EXT_WRITE", + "T76x_L2_EXT_WRITE_LINE", + "T76x_L2_EXT_WRITE_SMALL", + "T76x_L2_EXT_BARRIER", + "T76x_L2_EXT_AR_STALL", + "T76x_L2_EXT_R_BUF_FULL", + "T76x_L2_EXT_RD_BUF_FULL", + "T76x_L2_EXT_R_RAW", + "T76x_L2_EXT_W_STALL", + "T76x_L2_EXT_W_BUF_FULL", + "T76x_L2_EXT_R_BUF_FULL", + "T76x_L2_TAG_HAZARD", + "T76x_L2_SNOOP_FULL", + "T76x_L2_REPLAY_FULL" +}; + +static const char * const hardware_counters_mali_t82x[] = { + /* Job Manager */ + "", + "", + "", + "", + "T82x_MESSAGES_SENT", + "T82x_MESSAGES_RECEIVED", + "T82x_GPU_ACTIVE", + "T82x_IRQ_ACTIVE", + "T82x_JS0_JOBS", + "T82x_JS0_TASKS", + "T82x_JS0_ACTIVE", + "", + "T82x_JS0_WAIT_READ", + "T82x_JS0_WAIT_ISSUE", + "T82x_JS0_WAIT_DEPEND", + "T82x_JS0_WAIT_FINISH", + "T82x_JS1_JOBS", + "T82x_JS1_TASKS", + "T82x_JS1_ACTIVE", + "", + "T82x_JS1_WAIT_READ", + "T82x_JS1_WAIT_ISSUE", + "T82x_JS1_WAIT_DEPEND", + "T82x_JS1_WAIT_FINISH", + "T82x_JS2_JOBS", + "T82x_JS2_TASKS", + "T82x_JS2_ACTIVE", + "", + "T82x_JS2_WAIT_READ", + "T82x_JS2_WAIT_ISSUE", + "T82x_JS2_WAIT_DEPEND", + "T82x_JS2_WAIT_FINISH", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + + /*Tiler */ + "", + "", + "", + "T82x_TI_JOBS_PROCESSED", + "T82x_TI_TRIANGLES", + "T82x_TI_QUADS", + "T82x_TI_POLYGONS", + "T82x_TI_POINTS", + "T82x_TI_LINES", + "T82x_TI_FRONT_FACING", + "T82x_TI_BACK_FACING", + "T82x_TI_PRIM_VISIBLE", + "T82x_TI_PRIM_CULLED", + "T82x_TI_PRIM_CLIPPED", + "", + "", + "", + "", + "", + "", + "", + "", + "T82x_TI_ACTIVE", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + + /* Shader Core */ + "", + "", + "", + "", + "T82x_FRAG_ACTIVE", + "T82x_FRAG_PRIMITIVES", + "T82x_FRAG_PRIMITIVES_DROPPED", + "T82x_FRAG_CYCLES_DESC", + "T82x_FRAG_CYCLES_FPKQ_ACTIVE", + "T82x_FRAG_CYCLES_VERT", + "T82x_FRAG_CYCLES_TRISETUP", + "T82x_FRAG_CYCLES_EZS_ACTIVE", + "T82x_FRAG_THREADS", + "T82x_FRAG_DUMMY_THREADS", + "T82x_FRAG_QUADS_RAST", + "T82x_FRAG_QUADS_EZS_TEST", + "T82x_FRAG_QUADS_EZS_KILLED", + "T82x_FRAG_THREADS_LZS_TEST", + "T82x_FRAG_THREADS_LZS_KILLED", + "T82x_FRAG_CYCLES_NO_TILE", + "T82x_FRAG_NUM_TILES", + "T82x_FRAG_TRANS_ELIM", + "T82x_COMPUTE_ACTIVE", + "T82x_COMPUTE_TASKS", + "T82x_COMPUTE_THREADS", + "T82x_COMPUTE_CYCLES_DESC", + "T82x_TRIPIPE_ACTIVE", + "T82x_ARITH_WORDS", + "T82x_ARITH_CYCLES_REG", + "T82x_ARITH_CYCLES_L0", + "T82x_ARITH_FRAG_DEPEND", + "T82x_LS_WORDS", + "T82x_LS_ISSUES", + "T82x_LS_REISSUE_ATTR", + "T82x_LS_REISSUES_VARY", + "T82x_LS_VARY_RV_MISS", + "T82x_LS_VARY_RV_HIT", + "T82x_LS_NO_UNPARK", + "T82x_TEX_WORDS", + "T82x_TEX_BUBBLES", + "T82x_TEX_WORDS_L0", + "T82x_TEX_WORDS_DESC", + "T82x_TEX_ISSUES", + "T82x_TEX_RECIRC_FMISS", + "T82x_TEX_RECIRC_DESC", + "T82x_TEX_RECIRC_MULTI", + "T82x_TEX_RECIRC_PMISS", + "T82x_TEX_RECIRC_CONF", + "T82x_LSC_READ_HITS", + "T82x_LSC_READ_OP", + "T82x_LSC_WRITE_HITS", + "T82x_LSC_WRITE_OP", + "T82x_LSC_ATOMIC_HITS", + "T82x_LSC_ATOMIC_OP", + "T82x_LSC_LINE_FETCHES", + "T82x_LSC_DIRTY_LINE", + "T82x_LSC_SNOOPS", + "T82x_AXI_TLB_STALL", + "T82x_AXI_TLB_MISS", + "T82x_AXI_TLB_TRANSACTION", + "T82x_LS_TLB_MISS", + "T82x_LS_TLB_HIT", + "T82x_AXI_BEATS_READ", + "T82x_AXI_BEATS_WRITTEN", + + /*L2 and MMU */ + "", + "", + "", + "", + "T82x_MMU_HIT", + "T82x_MMU_NEW_MISS", + "T82x_MMU_REPLAY_FULL", + "T82x_MMU_REPLAY_MISS", + "T82x_MMU_TABLE_WALK", + "T82x_MMU_REQUESTS", + "", + "", + "T82x_UTLB_HIT", + "T82x_UTLB_NEW_MISS", + "T82x_UTLB_REPLAY_FULL", + "T82x_UTLB_REPLAY_MISS", + "T82x_UTLB_STALL", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "T82x_L2_EXT_WRITE_BEATS", + "T82x_L2_EXT_READ_BEATS", + "T82x_L2_ANY_LOOKUP", + "T82x_L2_READ_LOOKUP", + "T82x_L2_SREAD_LOOKUP", + "T82x_L2_READ_REPLAY", + "T82x_L2_READ_SNOOP", + "T82x_L2_READ_HIT", + "T82x_L2_CLEAN_MISS", + "T82x_L2_WRITE_LOOKUP", + "T82x_L2_SWRITE_LOOKUP", + "T82x_L2_WRITE_REPLAY", + "T82x_L2_WRITE_SNOOP", + "T82x_L2_WRITE_HIT", + "T82x_L2_EXT_READ_FULL", + "", + "T82x_L2_EXT_WRITE_FULL", + "T82x_L2_EXT_R_W_HAZARD", + "T82x_L2_EXT_READ", + "T82x_L2_EXT_READ_LINE", + "T82x_L2_EXT_WRITE", + "T82x_L2_EXT_WRITE_LINE", + "T82x_L2_EXT_WRITE_SMALL", + "T82x_L2_EXT_BARRIER", + "T82x_L2_EXT_AR_STALL", + "T82x_L2_EXT_R_BUF_FULL", + "T82x_L2_EXT_RD_BUF_FULL", + "T82x_L2_EXT_R_RAW", + "T82x_L2_EXT_W_STALL", + "T82x_L2_EXT_W_BUF_FULL", + "T82x_L2_EXT_R_BUF_FULL", + "T82x_L2_TAG_HAZARD", + "T82x_L2_SNOOP_FULL", + "T82x_L2_REPLAY_FULL" +}; + +static const char * const hardware_counters_mali_t83x[] = { + /* Job Manager */ + "", + "", + "", + "", + "T83x_MESSAGES_SENT", + "T83x_MESSAGES_RECEIVED", + "T83x_GPU_ACTIVE", + "T83x_IRQ_ACTIVE", + "T83x_JS0_JOBS", + "T83x_JS0_TASKS", + "T83x_JS0_ACTIVE", + "", + "T83x_JS0_WAIT_READ", + "T83x_JS0_WAIT_ISSUE", + "T83x_JS0_WAIT_DEPEND", + "T83x_JS0_WAIT_FINISH", + "T83x_JS1_JOBS", + "T83x_JS1_TASKS", + "T83x_JS1_ACTIVE", + "", + "T83x_JS1_WAIT_READ", + "T83x_JS1_WAIT_ISSUE", + "T83x_JS1_WAIT_DEPEND", + "T83x_JS1_WAIT_FINISH", + "T83x_JS2_JOBS", + "T83x_JS2_TASKS", + "T83x_JS2_ACTIVE", + "", + "T83x_JS2_WAIT_READ", + "T83x_JS2_WAIT_ISSUE", + "T83x_JS2_WAIT_DEPEND", + "T83x_JS2_WAIT_FINISH", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + + /*Tiler */ + "", + "", + "", + "T83x_TI_JOBS_PROCESSED", + "T83x_TI_TRIANGLES", + "T83x_TI_QUADS", + "T83x_TI_POLYGONS", + "T83x_TI_POINTS", + "T83x_TI_LINES", + "T83x_TI_FRONT_FACING", + "T83x_TI_BACK_FACING", + "T83x_TI_PRIM_VISIBLE", + "T83x_TI_PRIM_CULLED", + "T83x_TI_PRIM_CLIPPED", + "", + "", + "", + "", + "", + "", + "", + "", + "T83x_TI_ACTIVE", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + + /* Shader Core */ + "", + "", + "", + "", + "T83x_FRAG_ACTIVE", + "T83x_FRAG_PRIMITIVES", + "T83x_FRAG_PRIMITIVES_DROPPED", + "T83x_FRAG_CYCLES_DESC", + "T83x_FRAG_CYCLES_FPKQ_ACTIVE", + "T83x_FRAG_CYCLES_VERT", + "T83x_FRAG_CYCLES_TRISETUP", + "T83x_FRAG_CYCLES_EZS_ACTIVE", + "T83x_FRAG_THREADS", + "T83x_FRAG_DUMMY_THREADS", + "T83x_FRAG_QUADS_RAST", + "T83x_FRAG_QUADS_EZS_TEST", + "T83x_FRAG_QUADS_EZS_KILLED", + "T83x_FRAG_THREADS_LZS_TEST", + "T83x_FRAG_THREADS_LZS_KILLED", + "T83x_FRAG_CYCLES_NO_TILE", + "T83x_FRAG_NUM_TILES", + "T83x_FRAG_TRANS_ELIM", + "T83x_COMPUTE_ACTIVE", + "T83x_COMPUTE_TASKS", + "T83x_COMPUTE_THREADS", + "T83x_COMPUTE_CYCLES_DESC", + "T83x_TRIPIPE_ACTIVE", + "T83x_ARITH_WORDS", + "T83x_ARITH_CYCLES_REG", + "T83x_ARITH_CYCLES_L0", + "T83x_ARITH_FRAG_DEPEND", + "T83x_LS_WORDS", + "T83x_LS_ISSUES", + "T83x_LS_REISSUE_ATTR", + "T83x_LS_REISSUES_VARY", + "T83x_LS_VARY_RV_MISS", + "T83x_LS_VARY_RV_HIT", + "T83x_LS_NO_UNPARK", + "T83x_TEX_WORDS", + "T83x_TEX_BUBBLES", + "T83x_TEX_WORDS_L0", + "T83x_TEX_WORDS_DESC", + "T83x_TEX_ISSUES", + "T83x_TEX_RECIRC_FMISS", + "T83x_TEX_RECIRC_DESC", + "T83x_TEX_RECIRC_MULTI", + "T83x_TEX_RECIRC_PMISS", + "T83x_TEX_RECIRC_CONF", + "T83x_LSC_READ_HITS", + "T83x_LSC_READ_OP", + "T83x_LSC_WRITE_HITS", + "T83x_LSC_WRITE_OP", + "T83x_LSC_ATOMIC_HITS", + "T83x_LSC_ATOMIC_OP", + "T83x_LSC_LINE_FETCHES", + "T83x_LSC_DIRTY_LINE", + "T83x_LSC_SNOOPS", + "T83x_AXI_TLB_STALL", + "T83x_AXI_TLB_MISS", + "T83x_AXI_TLB_TRANSACTION", + "T83x_LS_TLB_MISS", + "T83x_LS_TLB_HIT", + "T83x_AXI_BEATS_READ", + "T83x_AXI_BEATS_WRITTEN", + + /*L2 and MMU */ + "", + "", + "", + "", + "T83x_MMU_HIT", + "T83x_MMU_NEW_MISS", + "T83x_MMU_REPLAY_FULL", + "T83x_MMU_REPLAY_MISS", + "T83x_MMU_TABLE_WALK", + "T83x_MMU_REQUESTS", + "", + "", + "T83x_UTLB_HIT", + "T83x_UTLB_NEW_MISS", + "T83x_UTLB_REPLAY_FULL", + "T83x_UTLB_REPLAY_MISS", + "T83x_UTLB_STALL", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "T83x_L2_EXT_WRITE_BEATS", + "T83x_L2_EXT_READ_BEATS", + "T83x_L2_ANY_LOOKUP", + "T83x_L2_READ_LOOKUP", + "T83x_L2_SREAD_LOOKUP", + "T83x_L2_READ_REPLAY", + "T83x_L2_READ_SNOOP", + "T83x_L2_READ_HIT", + "T83x_L2_CLEAN_MISS", + "T83x_L2_WRITE_LOOKUP", + "T83x_L2_SWRITE_LOOKUP", + "T83x_L2_WRITE_REPLAY", + "T83x_L2_WRITE_SNOOP", + "T83x_L2_WRITE_HIT", + "T83x_L2_EXT_READ_FULL", + "", + "T83x_L2_EXT_WRITE_FULL", + "T83x_L2_EXT_R_W_HAZARD", + "T83x_L2_EXT_READ", + "T83x_L2_EXT_READ_LINE", + "T83x_L2_EXT_WRITE", + "T83x_L2_EXT_WRITE_LINE", + "T83x_L2_EXT_WRITE_SMALL", + "T83x_L2_EXT_BARRIER", + "T83x_L2_EXT_AR_STALL", + "T83x_L2_EXT_R_BUF_FULL", + "T83x_L2_EXT_RD_BUF_FULL", + "T83x_L2_EXT_R_RAW", + "T83x_L2_EXT_W_STALL", + "T83x_L2_EXT_W_BUF_FULL", + "T83x_L2_EXT_R_BUF_FULL", + "T83x_L2_TAG_HAZARD", + "T83x_L2_SNOOP_FULL", + "T83x_L2_REPLAY_FULL" +}; + +static const char * const hardware_counters_mali_t86x[] = { + /* Job Manager */ + "", + "", + "", + "", + "T86x_MESSAGES_SENT", + "T86x_MESSAGES_RECEIVED", + "T86x_GPU_ACTIVE", + "T86x_IRQ_ACTIVE", + "T86x_JS0_JOBS", + "T86x_JS0_TASKS", + "T86x_JS0_ACTIVE", + "", + "T86x_JS0_WAIT_READ", + "T86x_JS0_WAIT_ISSUE", + "T86x_JS0_WAIT_DEPEND", + "T86x_JS0_WAIT_FINISH", + "T86x_JS1_JOBS", + "T86x_JS1_TASKS", + "T86x_JS1_ACTIVE", + "", + "T86x_JS1_WAIT_READ", + "T86x_JS1_WAIT_ISSUE", + "T86x_JS1_WAIT_DEPEND", + "T86x_JS1_WAIT_FINISH", + "T86x_JS2_JOBS", + "T86x_JS2_TASKS", + "T86x_JS2_ACTIVE", + "", + "T86x_JS2_WAIT_READ", + "T86x_JS2_WAIT_ISSUE", + "T86x_JS2_WAIT_DEPEND", + "T86x_JS2_WAIT_FINISH", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + + /*Tiler */ + "", + "", + "", + "T86x_TI_JOBS_PROCESSED", + "T86x_TI_TRIANGLES", + "T86x_TI_QUADS", + "T86x_TI_POLYGONS", + "T86x_TI_POINTS", + "T86x_TI_LINES", + "T86x_TI_VCACHE_HIT", + "T86x_TI_VCACHE_MISS", + "T86x_TI_FRONT_FACING", + "T86x_TI_BACK_FACING", + "T86x_TI_PRIM_VISIBLE", + "T86x_TI_PRIM_CULLED", + "T86x_TI_PRIM_CLIPPED", + "T86x_TI_LEVEL0", + "T86x_TI_LEVEL1", + "T86x_TI_LEVEL2", + "T86x_TI_LEVEL3", + "T86x_TI_LEVEL4", + "T86x_TI_LEVEL5", + "T86x_TI_LEVEL6", + "T86x_TI_LEVEL7", + "T86x_TI_COMMAND_1", + "T86x_TI_COMMAND_2", + "T86x_TI_COMMAND_3", + "T86x_TI_COMMAND_4", + "T86x_TI_COMMAND_5_7", + "T86x_TI_COMMAND_8_15", + "T86x_TI_COMMAND_16_63", + "T86x_TI_COMMAND_64", + "T86x_TI_COMPRESS_IN", + "T86x_TI_COMPRESS_OUT", + "T86x_TI_COMPRESS_FLUSH", + "T86x_TI_TIMESTAMPS", + "T86x_TI_PCACHE_HIT", + "T86x_TI_PCACHE_MISS", + "T86x_TI_PCACHE_LINE", + "T86x_TI_PCACHE_STALL", + "T86x_TI_WRBUF_HIT", + "T86x_TI_WRBUF_MISS", + "T86x_TI_WRBUF_LINE", + "T86x_TI_WRBUF_PARTIAL", + "T86x_TI_WRBUF_STALL", + "T86x_TI_ACTIVE", + "T86x_TI_LOADING_DESC", + "T86x_TI_INDEX_WAIT", + "T86x_TI_INDEX_RANGE_WAIT", + "T86x_TI_VERTEX_WAIT", + "T86x_TI_PCACHE_WAIT", + "T86x_TI_WRBUF_WAIT", + "T86x_TI_BUS_READ", + "T86x_TI_BUS_WRITE", + "", + "", + "", + "", + "", + "T86x_TI_UTLB_HIT", + "T86x_TI_UTLB_NEW_MISS", + "T86x_TI_UTLB_REPLAY_FULL", + "T86x_TI_UTLB_REPLAY_MISS", + "T86x_TI_UTLB_STALL", + + /* Shader Core */ + "", + "", + "", + "", + "T86x_FRAG_ACTIVE", + "T86x_FRAG_PRIMITIVES", + "T86x_FRAG_PRIMITIVES_DROPPED", + "T86x_FRAG_CYCLES_DESC", + "T86x_FRAG_CYCLES_FPKQ_ACTIVE", + "T86x_FRAG_CYCLES_VERT", + "T86x_FRAG_CYCLES_TRISETUP", + "T86x_FRAG_CYCLES_EZS_ACTIVE", + "T86x_FRAG_THREADS", + "T86x_FRAG_DUMMY_THREADS", + "T86x_FRAG_QUADS_RAST", + "T86x_FRAG_QUADS_EZS_TEST", + "T86x_FRAG_QUADS_EZS_KILLED", + "T86x_FRAG_THREADS_LZS_TEST", + "T86x_FRAG_THREADS_LZS_KILLED", + "T86x_FRAG_CYCLES_NO_TILE", + "T86x_FRAG_NUM_TILES", + "T86x_FRAG_TRANS_ELIM", + "T86x_COMPUTE_ACTIVE", + "T86x_COMPUTE_TASKS", + "T86x_COMPUTE_THREADS", + "T86x_COMPUTE_CYCLES_DESC", + "T86x_TRIPIPE_ACTIVE", + "T86x_ARITH_WORDS", + "T86x_ARITH_CYCLES_REG", + "T86x_ARITH_CYCLES_L0", + "T86x_ARITH_FRAG_DEPEND", + "T86x_LS_WORDS", + "T86x_LS_ISSUES", + "T86x_LS_REISSUE_ATTR", + "T86x_LS_REISSUES_VARY", + "T86x_LS_VARY_RV_MISS", + "T86x_LS_VARY_RV_HIT", + "T86x_LS_NO_UNPARK", + "T86x_TEX_WORDS", + "T86x_TEX_BUBBLES", + "T86x_TEX_WORDS_L0", + "T86x_TEX_WORDS_DESC", + "T86x_TEX_ISSUES", + "T86x_TEX_RECIRC_FMISS", + "T86x_TEX_RECIRC_DESC", + "T86x_TEX_RECIRC_MULTI", + "T86x_TEX_RECIRC_PMISS", + "T86x_TEX_RECIRC_CONF", + "T86x_LSC_READ_HITS", + "T86x_LSC_READ_OP", + "T86x_LSC_WRITE_HITS", + "T86x_LSC_WRITE_OP", + "T86x_LSC_ATOMIC_HITS", + "T86x_LSC_ATOMIC_OP", + "T86x_LSC_LINE_FETCHES", + "T86x_LSC_DIRTY_LINE", + "T86x_LSC_SNOOPS", + "T86x_AXI_TLB_STALL", + "T86x_AXI_TLB_MISS", + "T86x_AXI_TLB_TRANSACTION", + "T86x_LS_TLB_MISS", + "T86x_LS_TLB_HIT", + "T86x_AXI_BEATS_READ", + "T86x_AXI_BEATS_WRITTEN", + + /*L2 and MMU */ + "", + "", + "", + "", + "T86x_MMU_HIT", + "T86x_MMU_NEW_MISS", + "T86x_MMU_REPLAY_FULL", + "T86x_MMU_REPLAY_MISS", + "T86x_MMU_TABLE_WALK", + "T86x_MMU_REQUESTS", + "", + "", + "T86x_UTLB_HIT", + "T86x_UTLB_NEW_MISS", + "T86x_UTLB_REPLAY_FULL", + "T86x_UTLB_REPLAY_MISS", + "T86x_UTLB_STALL", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "T86x_L2_EXT_WRITE_BEATS", + "T86x_L2_EXT_READ_BEATS", + "T86x_L2_ANY_LOOKUP", + "T86x_L2_READ_LOOKUP", + "T86x_L2_SREAD_LOOKUP", + "T86x_L2_READ_REPLAY", + "T86x_L2_READ_SNOOP", + "T86x_L2_READ_HIT", + "T86x_L2_CLEAN_MISS", + "T86x_L2_WRITE_LOOKUP", + "T86x_L2_SWRITE_LOOKUP", + "T86x_L2_WRITE_REPLAY", + "T86x_L2_WRITE_SNOOP", + "T86x_L2_WRITE_HIT", + "T86x_L2_EXT_READ_FULL", + "", + "T86x_L2_EXT_WRITE_FULL", + "T86x_L2_EXT_R_W_HAZARD", + "T86x_L2_EXT_READ", + "T86x_L2_EXT_READ_LINE", + "T86x_L2_EXT_WRITE", + "T86x_L2_EXT_WRITE_LINE", + "T86x_L2_EXT_WRITE_SMALL", + "T86x_L2_EXT_BARRIER", + "T86x_L2_EXT_AR_STALL", + "T86x_L2_EXT_R_BUF_FULL", + "T86x_L2_EXT_RD_BUF_FULL", + "T86x_L2_EXT_R_RAW", + "T86x_L2_EXT_W_STALL", + "T86x_L2_EXT_W_BUF_FULL", + "T86x_L2_EXT_R_BUF_FULL", + "T86x_L2_TAG_HAZARD", + "T86x_L2_SNOOP_FULL", + "T86x_L2_REPLAY_FULL" +}; + +static const char * const hardware_counters_mali_t88x[] = { + /* Job Manager */ + "", + "", + "", + "", + "T88x_MESSAGES_SENT", + "T88x_MESSAGES_RECEIVED", + "T88x_GPU_ACTIVE", + "T88x_IRQ_ACTIVE", + "T88x_JS0_JOBS", + "T88x_JS0_TASKS", + "T88x_JS0_ACTIVE", + "", + "T88x_JS0_WAIT_READ", + "T88x_JS0_WAIT_ISSUE", + "T88x_JS0_WAIT_DEPEND", + "T88x_JS0_WAIT_FINISH", + "T88x_JS1_JOBS", + "T88x_JS1_TASKS", + "T88x_JS1_ACTIVE", + "", + "T88x_JS1_WAIT_READ", + "T88x_JS1_WAIT_ISSUE", + "T88x_JS1_WAIT_DEPEND", + "T88x_JS1_WAIT_FINISH", + "T88x_JS2_JOBS", + "T88x_JS2_TASKS", + "T88x_JS2_ACTIVE", + "", + "T88x_JS2_WAIT_READ", + "T88x_JS2_WAIT_ISSUE", + "T88x_JS2_WAIT_DEPEND", + "T88x_JS2_WAIT_FINISH", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + + /*Tiler */ + "", + "", + "", + "T88x_TI_JOBS_PROCESSED", + "T88x_TI_TRIANGLES", + "T88x_TI_QUADS", + "T88x_TI_POLYGONS", + "T88x_TI_POINTS", + "T88x_TI_LINES", + "T88x_TI_VCACHE_HIT", + "T88x_TI_VCACHE_MISS", + "T88x_TI_FRONT_FACING", + "T88x_TI_BACK_FACING", + "T88x_TI_PRIM_VISIBLE", + "T88x_TI_PRIM_CULLED", + "T88x_TI_PRIM_CLIPPED", + "T88x_TI_LEVEL0", + "T88x_TI_LEVEL1", + "T88x_TI_LEVEL2", + "T88x_TI_LEVEL3", + "T88x_TI_LEVEL4", + "T88x_TI_LEVEL5", + "T88x_TI_LEVEL6", + "T88x_TI_LEVEL7", + "T88x_TI_COMMAND_1", + "T88x_TI_COMMAND_2", + "T88x_TI_COMMAND_3", + "T88x_TI_COMMAND_4", + "T88x_TI_COMMAND_5_7", + "T88x_TI_COMMAND_8_15", + "T88x_TI_COMMAND_16_63", + "T88x_TI_COMMAND_64", + "T88x_TI_COMPRESS_IN", + "T88x_TI_COMPRESS_OUT", + "T88x_TI_COMPRESS_FLUSH", + "T88x_TI_TIMESTAMPS", + "T88x_TI_PCACHE_HIT", + "T88x_TI_PCACHE_MISS", + "T88x_TI_PCACHE_LINE", + "T88x_TI_PCACHE_STALL", + "T88x_TI_WRBUF_HIT", + "T88x_TI_WRBUF_MISS", + "T88x_TI_WRBUF_LINE", + "T88x_TI_WRBUF_PARTIAL", + "T88x_TI_WRBUF_STALL", + "T88x_TI_ACTIVE", + "T88x_TI_LOADING_DESC", + "T88x_TI_INDEX_WAIT", + "T88x_TI_INDEX_RANGE_WAIT", + "T88x_TI_VERTEX_WAIT", + "T88x_TI_PCACHE_WAIT", + "T88x_TI_WRBUF_WAIT", + "T88x_TI_BUS_READ", + "T88x_TI_BUS_WRITE", + "", + "", + "", + "", + "", + "T88x_TI_UTLB_HIT", + "T88x_TI_UTLB_NEW_MISS", + "T88x_TI_UTLB_REPLAY_FULL", + "T88x_TI_UTLB_REPLAY_MISS", + "T88x_TI_UTLB_STALL", + + /* Shader Core */ + "", + "", + "", + "", + "T88x_FRAG_ACTIVE", + "T88x_FRAG_PRIMITIVES", + "T88x_FRAG_PRIMITIVES_DROPPED", + "T88x_FRAG_CYCLES_DESC", + "T88x_FRAG_CYCLES_FPKQ_ACTIVE", + "T88x_FRAG_CYCLES_VERT", + "T88x_FRAG_CYCLES_TRISETUP", + "T88x_FRAG_CYCLES_EZS_ACTIVE", + "T88x_FRAG_THREADS", + "T88x_FRAG_DUMMY_THREADS", + "T88x_FRAG_QUADS_RAST", + "T88x_FRAG_QUADS_EZS_TEST", + "T88x_FRAG_QUADS_EZS_KILLED", + "T88x_FRAG_THREADS_LZS_TEST", + "T88x_FRAG_THREADS_LZS_KILLED", + "T88x_FRAG_CYCLES_NO_TILE", + "T88x_FRAG_NUM_TILES", + "T88x_FRAG_TRANS_ELIM", + "T88x_COMPUTE_ACTIVE", + "T88x_COMPUTE_TASKS", + "T88x_COMPUTE_THREADS", + "T88x_COMPUTE_CYCLES_DESC", + "T88x_TRIPIPE_ACTIVE", + "T88x_ARITH_WORDS", + "T88x_ARITH_CYCLES_REG", + "T88x_ARITH_CYCLES_L0", + "T88x_ARITH_FRAG_DEPEND", + "T88x_LS_WORDS", + "T88x_LS_ISSUES", + "T88x_LS_REISSUE_ATTR", + "T88x_LS_REISSUES_VARY", + "T88x_LS_VARY_RV_MISS", + "T88x_LS_VARY_RV_HIT", + "T88x_LS_NO_UNPARK", + "T88x_TEX_WORDS", + "T88x_TEX_BUBBLES", + "T88x_TEX_WORDS_L0", + "T88x_TEX_WORDS_DESC", + "T88x_TEX_ISSUES", + "T88x_TEX_RECIRC_FMISS", + "T88x_TEX_RECIRC_DESC", + "T88x_TEX_RECIRC_MULTI", + "T88x_TEX_RECIRC_PMISS", + "T88x_TEX_RECIRC_CONF", + "T88x_LSC_READ_HITS", + "T88x_LSC_READ_OP", + "T88x_LSC_WRITE_HITS", + "T88x_LSC_WRITE_OP", + "T88x_LSC_ATOMIC_HITS", + "T88x_LSC_ATOMIC_OP", + "T88x_LSC_LINE_FETCHES", + "T88x_LSC_DIRTY_LINE", + "T88x_LSC_SNOOPS", + "T88x_AXI_TLB_STALL", + "T88x_AXI_TLB_MISS", + "T88x_AXI_TLB_TRANSACTION", + "T88x_LS_TLB_MISS", + "T88x_LS_TLB_HIT", + "T88x_AXI_BEATS_READ", + "T88x_AXI_BEATS_WRITTEN", + + /*L2 and MMU */ + "", + "", + "", + "", + "T88x_MMU_HIT", + "T88x_MMU_NEW_MISS", + "T88x_MMU_REPLAY_FULL", + "T88x_MMU_REPLAY_MISS", + "T88x_MMU_TABLE_WALK", + "T88x_MMU_REQUESTS", + "", + "", + "T88x_UTLB_HIT", + "T88x_UTLB_NEW_MISS", + "T88x_UTLB_REPLAY_FULL", + "T88x_UTLB_REPLAY_MISS", + "T88x_UTLB_STALL", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "T88x_L2_EXT_WRITE_BEATS", + "T88x_L2_EXT_READ_BEATS", + "T88x_L2_ANY_LOOKUP", + "T88x_L2_READ_LOOKUP", + "T88x_L2_SREAD_LOOKUP", + "T88x_L2_READ_REPLAY", + "T88x_L2_READ_SNOOP", + "T88x_L2_READ_HIT", + "T88x_L2_CLEAN_MISS", + "T88x_L2_WRITE_LOOKUP", + "T88x_L2_SWRITE_LOOKUP", + "T88x_L2_WRITE_REPLAY", + "T88x_L2_WRITE_SNOOP", + "T88x_L2_WRITE_HIT", + "T88x_L2_EXT_READ_FULL", + "", + "T88x_L2_EXT_WRITE_FULL", + "T88x_L2_EXT_R_W_HAZARD", + "T88x_L2_EXT_READ", + "T88x_L2_EXT_READ_LINE", + "T88x_L2_EXT_WRITE", + "T88x_L2_EXT_WRITE_LINE", + "T88x_L2_EXT_WRITE_SMALL", + "T88x_L2_EXT_BARRIER", + "T88x_L2_EXT_AR_STALL", + "T88x_L2_EXT_R_BUF_FULL", + "T88x_L2_EXT_RD_BUF_FULL", + "T88x_L2_EXT_R_RAW", + "T88x_L2_EXT_W_STALL", + "T88x_L2_EXT_W_BUF_FULL", + "T88x_L2_EXT_R_BUF_FULL", + "T88x_L2_TAG_HAZARD", + "T88x_L2_SNOOP_FULL", + "T88x_L2_REPLAY_FULL" +}; + +#include "mali_kbase_gator_hwcnt_names_tmix.h" + +#include "mali_kbase_gator_hwcnt_names_thex.h" + +#include "mali_kbase_gator_hwcnt_names_tsix.h" + +#include "mali_kbase_gator_hwcnt_names_tnox.h" + +#include "mali_kbase_gator_hwcnt_names_tkax.h" + +#endif diff --git a/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_thex.h b/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_thex.h new file mode 100644 index 00000000000000..15fd4efdc6ca24 --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_thex.h @@ -0,0 +1,291 @@ +/* + * + * (C) COPYRIGHT 2016-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +/* + * This header was autogenerated, it should not be edited. + */ + +#ifndef _KBASE_GATOR_HWCNT_NAMES_THEX_H_ +#define _KBASE_GATOR_HWCNT_NAMES_THEX_H_ + +static const char * const hardware_counters_mali_tHEx[] = { + /* Performance counters for the Job Manager */ + "", + "", + "", + "", + "THEx_MESSAGES_SENT", + "THEx_MESSAGES_RECEIVED", + "THEx_GPU_ACTIVE", + "THEx_IRQ_ACTIVE", + "THEx_JS0_JOBS", + "THEx_JS0_TASKS", + "THEx_JS0_ACTIVE", + "", + "THEx_JS0_WAIT_READ", + "THEx_JS0_WAIT_ISSUE", + "THEx_JS0_WAIT_DEPEND", + "THEx_JS0_WAIT_FINISH", + "THEx_JS1_JOBS", + "THEx_JS1_TASKS", + "THEx_JS1_ACTIVE", + "", + "THEx_JS1_WAIT_READ", + "THEx_JS1_WAIT_ISSUE", + "THEx_JS1_WAIT_DEPEND", + "THEx_JS1_WAIT_FINISH", + "THEx_JS2_JOBS", + "THEx_JS2_TASKS", + "THEx_JS2_ACTIVE", + "", + "THEx_JS2_WAIT_READ", + "THEx_JS2_WAIT_ISSUE", + "THEx_JS2_WAIT_DEPEND", + "THEx_JS2_WAIT_FINISH", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + + /* Performance counters for the Tiler */ + "", + "", + "", + "", + "THEx_TILER_ACTIVE", + "THEx_JOBS_PROCESSED", + "THEx_TRIANGLES", + "THEx_LINES", + "THEx_POINTS", + "THEx_FRONT_FACING", + "THEx_BACK_FACING", + "THEx_PRIM_VISIBLE", + "THEx_PRIM_CULLED", + "THEx_PRIM_CLIPPED", + "THEx_PRIM_SAT_CULLED", + "THEx_BIN_ALLOC_INIT", + "THEx_BIN_ALLOC_OVERFLOW", + "THEx_BUS_READ", + "", + "THEx_BUS_WRITE", + "THEx_LOADING_DESC", + "THEx_IDVS_POS_SHAD_REQ", + "THEx_IDVS_POS_SHAD_WAIT", + "THEx_IDVS_POS_SHAD_STALL", + "THEx_IDVS_POS_FIFO_FULL", + "THEx_PREFETCH_STALL", + "THEx_VCACHE_HIT", + "THEx_VCACHE_MISS", + "THEx_VCACHE_LINE_WAIT", + "THEx_VFETCH_POS_READ_WAIT", + "THEx_VFETCH_VERTEX_WAIT", + "THEx_VFETCH_STALL", + "THEx_PRIMASSY_STALL", + "THEx_BBOX_GEN_STALL", + "THEx_IDVS_VBU_HIT", + "THEx_IDVS_VBU_MISS", + "THEx_IDVS_VBU_LINE_DEALLOCATE", + "THEx_IDVS_VAR_SHAD_REQ", + "THEx_IDVS_VAR_SHAD_STALL", + "THEx_BINNER_STALL", + "THEx_ITER_STALL", + "THEx_COMPRESS_MISS", + "THEx_COMPRESS_STALL", + "THEx_PCACHE_HIT", + "THEx_PCACHE_MISS", + "THEx_PCACHE_MISS_STALL", + "THEx_PCACHE_EVICT_STALL", + "THEx_PMGR_PTR_WR_STALL", + "THEx_PMGR_PTR_RD_STALL", + "THEx_PMGR_CMD_WR_STALL", + "THEx_WRBUF_ACTIVE", + "THEx_WRBUF_HIT", + "THEx_WRBUF_MISS", + "THEx_WRBUF_NO_FREE_LINE_STALL", + "THEx_WRBUF_NO_AXI_ID_STALL", + "THEx_WRBUF_AXI_STALL", + "", + "", + "", + "THEx_UTLB_TRANS", + "THEx_UTLB_TRANS_HIT", + "THEx_UTLB_TRANS_STALL", + "THEx_UTLB_TRANS_MISS_DELAY", + "THEx_UTLB_MMU_REQ", + + /* Performance counters for the Shader Core */ + "", + "", + "", + "", + "THEx_FRAG_ACTIVE", + "THEx_FRAG_PRIMITIVES", + "THEx_FRAG_PRIM_RAST", + "THEx_FRAG_FPK_ACTIVE", + "THEx_FRAG_STARVING", + "THEx_FRAG_WARPS", + "THEx_FRAG_PARTIAL_WARPS", + "THEx_FRAG_QUADS_RAST", + "THEx_FRAG_QUADS_EZS_TEST", + "THEx_FRAG_QUADS_EZS_UPDATE", + "THEx_FRAG_QUADS_EZS_KILL", + "THEx_FRAG_LZS_TEST", + "THEx_FRAG_LZS_KILL", + "", + "THEx_FRAG_PTILES", + "THEx_FRAG_TRANS_ELIM", + "THEx_QUAD_FPK_KILLER", + "", + "THEx_COMPUTE_ACTIVE", + "THEx_COMPUTE_TASKS", + "THEx_COMPUTE_WARPS", + "THEx_COMPUTE_STARVING", + "THEx_EXEC_CORE_ACTIVE", + "THEx_EXEC_ACTIVE", + "THEx_EXEC_INSTR_COUNT", + "THEx_EXEC_INSTR_DIVERGED", + "THEx_EXEC_INSTR_STARVING", + "THEx_ARITH_INSTR_SINGLE_FMA", + "THEx_ARITH_INSTR_DOUBLE", + "THEx_ARITH_INSTR_MSG", + "THEx_ARITH_INSTR_MSG_ONLY", + "THEx_TEX_INSTR", + "THEx_TEX_INSTR_MIPMAP", + "THEx_TEX_INSTR_COMPRESSED", + "THEx_TEX_INSTR_3D", + "THEx_TEX_INSTR_TRILINEAR", + "THEx_TEX_COORD_ISSUE", + "THEx_TEX_COORD_STALL", + "THEx_TEX_STARVE_CACHE", + "THEx_TEX_STARVE_FILTER", + "THEx_LS_MEM_READ_FULL", + "THEx_LS_MEM_READ_SHORT", + "THEx_LS_MEM_WRITE_FULL", + "THEx_LS_MEM_WRITE_SHORT", + "THEx_LS_MEM_ATOMIC", + "THEx_VARY_INSTR", + "THEx_VARY_SLOT_32", + "THEx_VARY_SLOT_16", + "THEx_ATTR_INSTR", + "THEx_ARITH_INSTR_FP_MUL", + "THEx_BEATS_RD_FTC", + "THEx_BEATS_RD_FTC_EXT", + "THEx_BEATS_RD_LSC", + "THEx_BEATS_RD_LSC_EXT", + "THEx_BEATS_RD_TEX", + "THEx_BEATS_RD_TEX_EXT", + "THEx_BEATS_RD_OTHER", + "THEx_BEATS_WR_LSC", + "THEx_BEATS_WR_TIB", + "", + + /* Performance counters for the Memory System */ + "", + "", + "", + "", + "THEx_MMU_REQUESTS", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "THEx_L2_RD_MSG_IN", + "THEx_L2_RD_MSG_IN_STALL", + "THEx_L2_WR_MSG_IN", + "THEx_L2_WR_MSG_IN_STALL", + "THEx_L2_SNP_MSG_IN", + "THEx_L2_SNP_MSG_IN_STALL", + "THEx_L2_RD_MSG_OUT", + "THEx_L2_RD_MSG_OUT_STALL", + "THEx_L2_WR_MSG_OUT", + "THEx_L2_ANY_LOOKUP", + "THEx_L2_READ_LOOKUP", + "THEx_L2_WRITE_LOOKUP", + "THEx_L2_EXT_SNOOP_LOOKUP", + "THEx_L2_EXT_READ", + "THEx_L2_EXT_READ_NOSNP", + "THEx_L2_EXT_READ_UNIQUE", + "THEx_L2_EXT_READ_BEATS", + "THEx_L2_EXT_AR_STALL", + "THEx_L2_EXT_AR_CNT_Q1", + "THEx_L2_EXT_AR_CNT_Q2", + "THEx_L2_EXT_AR_CNT_Q3", + "THEx_L2_EXT_RRESP_0_127", + "THEx_L2_EXT_RRESP_128_191", + "THEx_L2_EXT_RRESP_192_255", + "THEx_L2_EXT_RRESP_256_319", + "THEx_L2_EXT_RRESP_320_383", + "THEx_L2_EXT_WRITE", + "THEx_L2_EXT_WRITE_NOSNP_FULL", + "THEx_L2_EXT_WRITE_NOSNP_PTL", + "THEx_L2_EXT_WRITE_SNP_FULL", + "THEx_L2_EXT_WRITE_SNP_PTL", + "THEx_L2_EXT_WRITE_BEATS", + "THEx_L2_EXT_W_STALL", + "THEx_L2_EXT_AW_CNT_Q1", + "THEx_L2_EXT_AW_CNT_Q2", + "THEx_L2_EXT_AW_CNT_Q3", + "THEx_L2_EXT_SNOOP", + "THEx_L2_EXT_SNOOP_STALL", + "THEx_L2_EXT_SNOOP_RESP_CLEAN", + "THEx_L2_EXT_SNOOP_RESP_DATA", + "THEx_L2_EXT_SNOOP_INTERNAL", + "", + "", + "", + "", + "", + "", + "", +}; + +#endif /* _KBASE_GATOR_HWCNT_NAMES_THEX_H_ */ diff --git a/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_tkax.h b/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_tkax.h new file mode 100644 index 00000000000000..a131d45474019c --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_tkax.h @@ -0,0 +1,291 @@ +/* + * + * (C) COPYRIGHT 2016-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +/* + * This header was autogenerated, it should not be edited. + */ + +#ifndef _KBASE_GATOR_HWCNT_NAMES_TKAX_H_ +#define _KBASE_GATOR_HWCNT_NAMES_TKAX_H_ + +static const char * const hardware_counters_mali_tKAx[] = { + /* Performance counters for the Job Manager */ + "", + "", + "", + "", + "TKAx_MESSAGES_SENT", + "TKAx_MESSAGES_RECEIVED", + "TKAx_GPU_ACTIVE", + "TKAx_IRQ_ACTIVE", + "TKAx_JS0_JOBS", + "TKAx_JS0_TASKS", + "TKAx_JS0_ACTIVE", + "", + "TKAx_JS0_WAIT_READ", + "TKAx_JS0_WAIT_ISSUE", + "TKAx_JS0_WAIT_DEPEND", + "TKAx_JS0_WAIT_FINISH", + "TKAx_JS1_JOBS", + "TKAx_JS1_TASKS", + "TKAx_JS1_ACTIVE", + "", + "TKAx_JS1_WAIT_READ", + "TKAx_JS1_WAIT_ISSUE", + "TKAx_JS1_WAIT_DEPEND", + "TKAx_JS1_WAIT_FINISH", + "TKAx_JS2_JOBS", + "TKAx_JS2_TASKS", + "TKAx_JS2_ACTIVE", + "", + "TKAx_JS2_WAIT_READ", + "TKAx_JS2_WAIT_ISSUE", + "TKAx_JS2_WAIT_DEPEND", + "TKAx_JS2_WAIT_FINISH", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + + /* Performance counters for the Tiler */ + "", + "", + "", + "", + "TKAx_TILER_ACTIVE", + "TKAx_JOBS_PROCESSED", + "TKAx_TRIANGLES", + "TKAx_LINES", + "TKAx_POINTS", + "TKAx_FRONT_FACING", + "TKAx_BACK_FACING", + "TKAx_PRIM_VISIBLE", + "TKAx_PRIM_CULLED", + "TKAx_PRIM_CLIPPED", + "TKAx_PRIM_SAT_CULLED", + "TKAx_BIN_ALLOC_INIT", + "TKAx_BIN_ALLOC_OVERFLOW", + "TKAx_BUS_READ", + "", + "TKAx_BUS_WRITE", + "TKAx_LOADING_DESC", + "TKAx_IDVS_POS_SHAD_REQ", + "TKAx_IDVS_POS_SHAD_WAIT", + "TKAx_IDVS_POS_SHAD_STALL", + "TKAx_IDVS_POS_FIFO_FULL", + "TKAx_PREFETCH_STALL", + "TKAx_VCACHE_HIT", + "TKAx_VCACHE_MISS", + "TKAx_VCACHE_LINE_WAIT", + "TKAx_VFETCH_POS_READ_WAIT", + "TKAx_VFETCH_VERTEX_WAIT", + "TKAx_VFETCH_STALL", + "TKAx_PRIMASSY_STALL", + "TKAx_BBOX_GEN_STALL", + "TKAx_IDVS_VBU_HIT", + "TKAx_IDVS_VBU_MISS", + "TKAx_IDVS_VBU_LINE_DEALLOCATE", + "TKAx_IDVS_VAR_SHAD_REQ", + "TKAx_IDVS_VAR_SHAD_STALL", + "TKAx_BINNER_STALL", + "TKAx_ITER_STALL", + "TKAx_COMPRESS_MISS", + "TKAx_COMPRESS_STALL", + "TKAx_PCACHE_HIT", + "TKAx_PCACHE_MISS", + "TKAx_PCACHE_MISS_STALL", + "TKAx_PCACHE_EVICT_STALL", + "TKAx_PMGR_PTR_WR_STALL", + "TKAx_PMGR_PTR_RD_STALL", + "TKAx_PMGR_CMD_WR_STALL", + "TKAx_WRBUF_ACTIVE", + "TKAx_WRBUF_HIT", + "TKAx_WRBUF_MISS", + "TKAx_WRBUF_NO_FREE_LINE_STALL", + "TKAx_WRBUF_NO_AXI_ID_STALL", + "TKAx_WRBUF_AXI_STALL", + "", + "", + "", + "TKAx_UTLB_TRANS", + "TKAx_UTLB_TRANS_HIT", + "TKAx_UTLB_TRANS_STALL", + "TKAx_UTLB_TRANS_MISS_DELAY", + "TKAx_UTLB_MMU_REQ", + + /* Performance counters for the Shader Core */ + "", + "", + "", + "", + "TKAx_FRAG_ACTIVE", + "TKAx_FRAG_PRIMITIVES", + "TKAx_FRAG_PRIM_RAST", + "TKAx_FRAG_FPK_ACTIVE", + "TKAx_FRAG_STARVING", + "TKAx_FRAG_WARPS", + "TKAx_FRAG_PARTIAL_WARPS", + "TKAx_FRAG_QUADS_RAST", + "TKAx_FRAG_QUADS_EZS_TEST", + "TKAx_FRAG_QUADS_EZS_UPDATE", + "TKAx_FRAG_QUADS_EZS_KILL", + "TKAx_FRAG_LZS_TEST", + "TKAx_FRAG_LZS_KILL", + "", + "TKAx_FRAG_PTILES", + "TKAx_FRAG_TRANS_ELIM", + "TKAx_QUAD_FPK_KILLER", + "", + "TKAx_COMPUTE_ACTIVE", + "TKAx_COMPUTE_TASKS", + "TKAx_COMPUTE_WARPS", + "TKAx_COMPUTE_STARVING", + "TKAx_EXEC_CORE_ACTIVE", + "TKAx_EXEC_ACTIVE", + "TKAx_EXEC_INSTR_COUNT", + "TKAx_EXEC_INSTR_DIVERGED", + "TKAx_EXEC_INSTR_STARVING", + "TKAx_ARITH_INSTR_SINGLE_FMA", + "TKAx_ARITH_INSTR_DOUBLE", + "TKAx_ARITH_INSTR_MSG", + "TKAx_ARITH_INSTR_MSG_ONLY", + "TKAx_TEX_MSGI_NUM_QUADS", + "TKAx_TEX_DFCH_NUM_PASSES", + "TKAx_TEX_DFCH_NUM_PASSES_MISS", + "TKAx_TEX_DFCH_NUM_PASSES_MIP_MAP", + "TKAx_TEX_TIDX_NUM_SPLIT_MIP_MAP", + "TKAx_TEX_TFCH_NUM_LINES_FETCHED", + "TKAx_TEX_TFCH_NUM_LINES_FETCHED_BLOCK", + "TKAx_TEX_TFCH_NUM_OPERATIONS", + "TKAx_TEX_FILT_NUM_OPERATIONS", + "TKAx_LS_MEM_READ_FULL", + "TKAx_LS_MEM_READ_SHORT", + "TKAx_LS_MEM_WRITE_FULL", + "TKAx_LS_MEM_WRITE_SHORT", + "TKAx_LS_MEM_ATOMIC", + "TKAx_VARY_INSTR", + "TKAx_VARY_SLOT_32", + "TKAx_VARY_SLOT_16", + "TKAx_ATTR_INSTR", + "TKAx_ARITH_INSTR_FP_MUL", + "TKAx_BEATS_RD_FTC", + "TKAx_BEATS_RD_FTC_EXT", + "TKAx_BEATS_RD_LSC", + "TKAx_BEATS_RD_LSC_EXT", + "TKAx_BEATS_RD_TEX", + "TKAx_BEATS_RD_TEX_EXT", + "TKAx_BEATS_RD_OTHER", + "TKAx_BEATS_WR_LSC_WB", + "TKAx_BEATS_WR_TIB", + "TKAx_BEATS_WR_LSC_OTHER", + + /* Performance counters for the Memory System */ + "", + "", + "", + "", + "TKAx_MMU_REQUESTS", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "TKAx_L2_RD_MSG_IN", + "TKAx_L2_RD_MSG_IN_STALL", + "TKAx_L2_WR_MSG_IN", + "TKAx_L2_WR_MSG_IN_STALL", + "TKAx_L2_SNP_MSG_IN", + "TKAx_L2_SNP_MSG_IN_STALL", + "TKAx_L2_RD_MSG_OUT", + "TKAx_L2_RD_MSG_OUT_STALL", + "TKAx_L2_WR_MSG_OUT", + "TKAx_L2_ANY_LOOKUP", + "TKAx_L2_READ_LOOKUP", + "TKAx_L2_WRITE_LOOKUP", + "TKAx_L2_EXT_SNOOP_LOOKUP", + "TKAx_L2_EXT_READ", + "TKAx_L2_EXT_READ_NOSNP", + "TKAx_L2_EXT_READ_UNIQUE", + "TKAx_L2_EXT_READ_BEATS", + "TKAx_L2_EXT_AR_STALL", + "TKAx_L2_EXT_AR_CNT_Q1", + "TKAx_L2_EXT_AR_CNT_Q2", + "TKAx_L2_EXT_AR_CNT_Q3", + "TKAx_L2_EXT_RRESP_0_127", + "TKAx_L2_EXT_RRESP_128_191", + "TKAx_L2_EXT_RRESP_192_255", + "TKAx_L2_EXT_RRESP_256_319", + "TKAx_L2_EXT_RRESP_320_383", + "TKAx_L2_EXT_WRITE", + "TKAx_L2_EXT_WRITE_NOSNP_FULL", + "TKAx_L2_EXT_WRITE_NOSNP_PTL", + "TKAx_L2_EXT_WRITE_SNP_FULL", + "TKAx_L2_EXT_WRITE_SNP_PTL", + "TKAx_L2_EXT_WRITE_BEATS", + "TKAx_L2_EXT_W_STALL", + "TKAx_L2_EXT_AW_CNT_Q1", + "TKAx_L2_EXT_AW_CNT_Q2", + "TKAx_L2_EXT_AW_CNT_Q3", + "TKAx_L2_EXT_SNOOP", + "TKAx_L2_EXT_SNOOP_STALL", + "TKAx_L2_EXT_SNOOP_RESP_CLEAN", + "TKAx_L2_EXT_SNOOP_RESP_DATA", + "TKAx_L2_EXT_SNOOP_INTERNAL", + "", + "", + "", + "", + "", + "", + "", +}; + +#endif /* _KBASE_GATOR_HWCNT_NAMES_TKAX_H_ */ diff --git a/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_tmix.h b/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_tmix.h new file mode 100644 index 00000000000000..8a215f723570cc --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_tmix.h @@ -0,0 +1,291 @@ +/* + * + * (C) COPYRIGHT 2016-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +/* + * This header was autogenerated, it should not be edited. + */ + +#ifndef _KBASE_GATOR_HWCNT_NAMES_TMIX_H_ +#define _KBASE_GATOR_HWCNT_NAMES_TMIX_H_ + +static const char * const hardware_counters_mali_tMIx[] = { + /* Performance counters for the Job Manager */ + "", + "", + "", + "", + "TMIx_MESSAGES_SENT", + "TMIx_MESSAGES_RECEIVED", + "TMIx_GPU_ACTIVE", + "TMIx_IRQ_ACTIVE", + "TMIx_JS0_JOBS", + "TMIx_JS0_TASKS", + "TMIx_JS0_ACTIVE", + "", + "TMIx_JS0_WAIT_READ", + "TMIx_JS0_WAIT_ISSUE", + "TMIx_JS0_WAIT_DEPEND", + "TMIx_JS0_WAIT_FINISH", + "TMIx_JS1_JOBS", + "TMIx_JS1_TASKS", + "TMIx_JS1_ACTIVE", + "", + "TMIx_JS1_WAIT_READ", + "TMIx_JS1_WAIT_ISSUE", + "TMIx_JS1_WAIT_DEPEND", + "TMIx_JS1_WAIT_FINISH", + "TMIx_JS2_JOBS", + "TMIx_JS2_TASKS", + "TMIx_JS2_ACTIVE", + "", + "TMIx_JS2_WAIT_READ", + "TMIx_JS2_WAIT_ISSUE", + "TMIx_JS2_WAIT_DEPEND", + "TMIx_JS2_WAIT_FINISH", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + + /* Performance counters for the Tiler */ + "", + "", + "", + "", + "TMIx_TILER_ACTIVE", + "TMIx_JOBS_PROCESSED", + "TMIx_TRIANGLES", + "TMIx_LINES", + "TMIx_POINTS", + "TMIx_FRONT_FACING", + "TMIx_BACK_FACING", + "TMIx_PRIM_VISIBLE", + "TMIx_PRIM_CULLED", + "TMIx_PRIM_CLIPPED", + "TMIx_PRIM_SAT_CULLED", + "TMIx_BIN_ALLOC_INIT", + "TMIx_BIN_ALLOC_OVERFLOW", + "TMIx_BUS_READ", + "", + "TMIx_BUS_WRITE", + "TMIx_LOADING_DESC", + "TMIx_IDVS_POS_SHAD_REQ", + "TMIx_IDVS_POS_SHAD_WAIT", + "TMIx_IDVS_POS_SHAD_STALL", + "TMIx_IDVS_POS_FIFO_FULL", + "TMIx_PREFETCH_STALL", + "TMIx_VCACHE_HIT", + "TMIx_VCACHE_MISS", + "TMIx_VCACHE_LINE_WAIT", + "TMIx_VFETCH_POS_READ_WAIT", + "TMIx_VFETCH_VERTEX_WAIT", + "TMIx_VFETCH_STALL", + "TMIx_PRIMASSY_STALL", + "TMIx_BBOX_GEN_STALL", + "TMIx_IDVS_VBU_HIT", + "TMIx_IDVS_VBU_MISS", + "TMIx_IDVS_VBU_LINE_DEALLOCATE", + "TMIx_IDVS_VAR_SHAD_REQ", + "TMIx_IDVS_VAR_SHAD_STALL", + "TMIx_BINNER_STALL", + "TMIx_ITER_STALL", + "TMIx_COMPRESS_MISS", + "TMIx_COMPRESS_STALL", + "TMIx_PCACHE_HIT", + "TMIx_PCACHE_MISS", + "TMIx_PCACHE_MISS_STALL", + "TMIx_PCACHE_EVICT_STALL", + "TMIx_PMGR_PTR_WR_STALL", + "TMIx_PMGR_PTR_RD_STALL", + "TMIx_PMGR_CMD_WR_STALL", + "TMIx_WRBUF_ACTIVE", + "TMIx_WRBUF_HIT", + "TMIx_WRBUF_MISS", + "TMIx_WRBUF_NO_FREE_LINE_STALL", + "TMIx_WRBUF_NO_AXI_ID_STALL", + "TMIx_WRBUF_AXI_STALL", + "", + "", + "", + "TMIx_UTLB_TRANS", + "TMIx_UTLB_TRANS_HIT", + "TMIx_UTLB_TRANS_STALL", + "TMIx_UTLB_TRANS_MISS_DELAY", + "TMIx_UTLB_MMU_REQ", + + /* Performance counters for the Shader Core */ + "", + "", + "", + "", + "TMIx_FRAG_ACTIVE", + "TMIx_FRAG_PRIMITIVES", + "TMIx_FRAG_PRIM_RAST", + "TMIx_FRAG_FPK_ACTIVE", + "TMIx_FRAG_STARVING", + "TMIx_FRAG_WARPS", + "TMIx_FRAG_PARTIAL_WARPS", + "TMIx_FRAG_QUADS_RAST", + "TMIx_FRAG_QUADS_EZS_TEST", + "TMIx_FRAG_QUADS_EZS_UPDATE", + "TMIx_FRAG_QUADS_EZS_KILL", + "TMIx_FRAG_LZS_TEST", + "TMIx_FRAG_LZS_KILL", + "", + "TMIx_FRAG_PTILES", + "TMIx_FRAG_TRANS_ELIM", + "TMIx_QUAD_FPK_KILLER", + "", + "TMIx_COMPUTE_ACTIVE", + "TMIx_COMPUTE_TASKS", + "TMIx_COMPUTE_WARPS", + "TMIx_COMPUTE_STARVING", + "TMIx_EXEC_CORE_ACTIVE", + "TMIx_EXEC_ACTIVE", + "TMIx_EXEC_INSTR_COUNT", + "TMIx_EXEC_INSTR_DIVERGED", + "TMIx_EXEC_INSTR_STARVING", + "TMIx_ARITH_INSTR_SINGLE_FMA", + "TMIx_ARITH_INSTR_DOUBLE", + "TMIx_ARITH_INSTR_MSG", + "TMIx_ARITH_INSTR_MSG_ONLY", + "TMIx_TEX_INSTR", + "TMIx_TEX_INSTR_MIPMAP", + "TMIx_TEX_INSTR_COMPRESSED", + "TMIx_TEX_INSTR_3D", + "TMIx_TEX_INSTR_TRILINEAR", + "TMIx_TEX_COORD_ISSUE", + "TMIx_TEX_COORD_STALL", + "TMIx_TEX_STARVE_CACHE", + "TMIx_TEX_STARVE_FILTER", + "TMIx_LS_MEM_READ_FULL", + "TMIx_LS_MEM_READ_SHORT", + "TMIx_LS_MEM_WRITE_FULL", + "TMIx_LS_MEM_WRITE_SHORT", + "TMIx_LS_MEM_ATOMIC", + "TMIx_VARY_INSTR", + "TMIx_VARY_SLOT_32", + "TMIx_VARY_SLOT_16", + "TMIx_ATTR_INSTR", + "TMIx_ARITH_INSTR_FP_MUL", + "TMIx_BEATS_RD_FTC", + "TMIx_BEATS_RD_FTC_EXT", + "TMIx_BEATS_RD_LSC", + "TMIx_BEATS_RD_LSC_EXT", + "TMIx_BEATS_RD_TEX", + "TMIx_BEATS_RD_TEX_EXT", + "TMIx_BEATS_RD_OTHER", + "TMIx_BEATS_WR_LSC", + "TMIx_BEATS_WR_TIB", + "", + + /* Performance counters for the Memory System */ + "", + "", + "", + "", + "TMIx_MMU_REQUESTS", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "TMIx_L2_RD_MSG_IN", + "TMIx_L2_RD_MSG_IN_STALL", + "TMIx_L2_WR_MSG_IN", + "TMIx_L2_WR_MSG_IN_STALL", + "TMIx_L2_SNP_MSG_IN", + "TMIx_L2_SNP_MSG_IN_STALL", + "TMIx_L2_RD_MSG_OUT", + "TMIx_L2_RD_MSG_OUT_STALL", + "TMIx_L2_WR_MSG_OUT", + "TMIx_L2_ANY_LOOKUP", + "TMIx_L2_READ_LOOKUP", + "TMIx_L2_WRITE_LOOKUP", + "TMIx_L2_EXT_SNOOP_LOOKUP", + "TMIx_L2_EXT_READ", + "TMIx_L2_EXT_READ_NOSNP", + "TMIx_L2_EXT_READ_UNIQUE", + "TMIx_L2_EXT_READ_BEATS", + "TMIx_L2_EXT_AR_STALL", + "TMIx_L2_EXT_AR_CNT_Q1", + "TMIx_L2_EXT_AR_CNT_Q2", + "TMIx_L2_EXT_AR_CNT_Q3", + "TMIx_L2_EXT_RRESP_0_127", + "TMIx_L2_EXT_RRESP_128_191", + "TMIx_L2_EXT_RRESP_192_255", + "TMIx_L2_EXT_RRESP_256_319", + "TMIx_L2_EXT_RRESP_320_383", + "TMIx_L2_EXT_WRITE", + "TMIx_L2_EXT_WRITE_NOSNP_FULL", + "TMIx_L2_EXT_WRITE_NOSNP_PTL", + "TMIx_L2_EXT_WRITE_SNP_FULL", + "TMIx_L2_EXT_WRITE_SNP_PTL", + "TMIx_L2_EXT_WRITE_BEATS", + "TMIx_L2_EXT_W_STALL", + "TMIx_L2_EXT_AW_CNT_Q1", + "TMIx_L2_EXT_AW_CNT_Q2", + "TMIx_L2_EXT_AW_CNT_Q3", + "TMIx_L2_EXT_SNOOP", + "TMIx_L2_EXT_SNOOP_STALL", + "TMIx_L2_EXT_SNOOP_RESP_CLEAN", + "TMIx_L2_EXT_SNOOP_RESP_DATA", + "TMIx_L2_EXT_SNOOP_INTERNAL", + "", + "", + "", + "", + "", + "", + "", +}; + +#endif /* _KBASE_GATOR_HWCNT_NAMES_TMIX_H_ */ diff --git a/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_tnox.h b/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_tnox.h new file mode 100644 index 00000000000000..b6175a56a8bb4c --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_tnox.h @@ -0,0 +1,291 @@ +/* + * + * (C) COPYRIGHT 2016-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +/* + * This header was autogenerated, it should not be edited. + */ + +#ifndef _KBASE_GATOR_HWCNT_NAMES_TNOX_H_ +#define _KBASE_GATOR_HWCNT_NAMES_TNOX_H_ + +static const char * const hardware_counters_mali_tNOx[] = { + /* Performance counters for the Job Manager */ + "", + "", + "", + "", + "TNOx_MESSAGES_SENT", + "TNOx_MESSAGES_RECEIVED", + "TNOx_GPU_ACTIVE", + "TNOx_IRQ_ACTIVE", + "TNOx_JS0_JOBS", + "TNOx_JS0_TASKS", + "TNOx_JS0_ACTIVE", + "", + "TNOx_JS0_WAIT_READ", + "TNOx_JS0_WAIT_ISSUE", + "TNOx_JS0_WAIT_DEPEND", + "TNOx_JS0_WAIT_FINISH", + "TNOx_JS1_JOBS", + "TNOx_JS1_TASKS", + "TNOx_JS1_ACTIVE", + "", + "TNOx_JS1_WAIT_READ", + "TNOx_JS1_WAIT_ISSUE", + "TNOx_JS1_WAIT_DEPEND", + "TNOx_JS1_WAIT_FINISH", + "TNOx_JS2_JOBS", + "TNOx_JS2_TASKS", + "TNOx_JS2_ACTIVE", + "", + "TNOx_JS2_WAIT_READ", + "TNOx_JS2_WAIT_ISSUE", + "TNOx_JS2_WAIT_DEPEND", + "TNOx_JS2_WAIT_FINISH", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + + /* Performance counters for the Tiler */ + "", + "", + "", + "", + "TNOx_TILER_ACTIVE", + "TNOx_JOBS_PROCESSED", + "TNOx_TRIANGLES", + "TNOx_LINES", + "TNOx_POINTS", + "TNOx_FRONT_FACING", + "TNOx_BACK_FACING", + "TNOx_PRIM_VISIBLE", + "TNOx_PRIM_CULLED", + "TNOx_PRIM_CLIPPED", + "TNOx_PRIM_SAT_CULLED", + "TNOx_BIN_ALLOC_INIT", + "TNOx_BIN_ALLOC_OVERFLOW", + "TNOx_BUS_READ", + "", + "TNOx_BUS_WRITE", + "TNOx_LOADING_DESC", + "TNOx_IDVS_POS_SHAD_REQ", + "TNOx_IDVS_POS_SHAD_WAIT", + "TNOx_IDVS_POS_SHAD_STALL", + "TNOx_IDVS_POS_FIFO_FULL", + "TNOx_PREFETCH_STALL", + "TNOx_VCACHE_HIT", + "TNOx_VCACHE_MISS", + "TNOx_VCACHE_LINE_WAIT", + "TNOx_VFETCH_POS_READ_WAIT", + "TNOx_VFETCH_VERTEX_WAIT", + "TNOx_VFETCH_STALL", + "TNOx_PRIMASSY_STALL", + "TNOx_BBOX_GEN_STALL", + "TNOx_IDVS_VBU_HIT", + "TNOx_IDVS_VBU_MISS", + "TNOx_IDVS_VBU_LINE_DEALLOCATE", + "TNOx_IDVS_VAR_SHAD_REQ", + "TNOx_IDVS_VAR_SHAD_STALL", + "TNOx_BINNER_STALL", + "TNOx_ITER_STALL", + "TNOx_COMPRESS_MISS", + "TNOx_COMPRESS_STALL", + "TNOx_PCACHE_HIT", + "TNOx_PCACHE_MISS", + "TNOx_PCACHE_MISS_STALL", + "TNOx_PCACHE_EVICT_STALL", + "TNOx_PMGR_PTR_WR_STALL", + "TNOx_PMGR_PTR_RD_STALL", + "TNOx_PMGR_CMD_WR_STALL", + "TNOx_WRBUF_ACTIVE", + "TNOx_WRBUF_HIT", + "TNOx_WRBUF_MISS", + "TNOx_WRBUF_NO_FREE_LINE_STALL", + "TNOx_WRBUF_NO_AXI_ID_STALL", + "TNOx_WRBUF_AXI_STALL", + "", + "", + "", + "TNOx_UTLB_TRANS", + "TNOx_UTLB_TRANS_HIT", + "TNOx_UTLB_TRANS_STALL", + "TNOx_UTLB_TRANS_MISS_DELAY", + "TNOx_UTLB_MMU_REQ", + + /* Performance counters for the Shader Core */ + "", + "", + "", + "", + "TNOx_FRAG_ACTIVE", + "TNOx_FRAG_PRIMITIVES", + "TNOx_FRAG_PRIM_RAST", + "TNOx_FRAG_FPK_ACTIVE", + "TNOx_FRAG_STARVING", + "TNOx_FRAG_WARPS", + "TNOx_FRAG_PARTIAL_WARPS", + "TNOx_FRAG_QUADS_RAST", + "TNOx_FRAG_QUADS_EZS_TEST", + "TNOx_FRAG_QUADS_EZS_UPDATE", + "TNOx_FRAG_QUADS_EZS_KILL", + "TNOx_FRAG_LZS_TEST", + "TNOx_FRAG_LZS_KILL", + "", + "TNOx_FRAG_PTILES", + "TNOx_FRAG_TRANS_ELIM", + "TNOx_QUAD_FPK_KILLER", + "", + "TNOx_COMPUTE_ACTIVE", + "TNOx_COMPUTE_TASKS", + "TNOx_COMPUTE_WARPS", + "TNOx_COMPUTE_STARVING", + "TNOx_EXEC_CORE_ACTIVE", + "TNOx_EXEC_ACTIVE", + "TNOx_EXEC_INSTR_COUNT", + "TNOx_EXEC_INSTR_DIVERGED", + "TNOx_EXEC_INSTR_STARVING", + "TNOx_ARITH_INSTR_SINGLE_FMA", + "TNOx_ARITH_INSTR_DOUBLE", + "TNOx_ARITH_INSTR_MSG", + "TNOx_ARITH_INSTR_MSG_ONLY", + "TNOx_TEX_MSGI_NUM_QUADS", + "TNOx_TEX_DFCH_NUM_PASSES", + "TNOx_TEX_DFCH_NUM_PASSES_MISS", + "TNOx_TEX_DFCH_NUM_PASSES_MIP_MAP", + "TNOx_TEX_TIDX_NUM_SPLIT_MIP_MAP", + "TNOx_TEX_TFCH_NUM_LINES_FETCHED", + "TNOx_TEX_TFCH_NUM_LINES_FETCHED_BLOCK", + "TNOx_TEX_TFCH_NUM_OPERATIONS", + "TNOx_TEX_FILT_NUM_OPERATIONS", + "TNOx_LS_MEM_READ_FULL", + "TNOx_LS_MEM_READ_SHORT", + "TNOx_LS_MEM_WRITE_FULL", + "TNOx_LS_MEM_WRITE_SHORT", + "TNOx_LS_MEM_ATOMIC", + "TNOx_VARY_INSTR", + "TNOx_VARY_SLOT_32", + "TNOx_VARY_SLOT_16", + "TNOx_ATTR_INSTR", + "TNOx_ARITH_INSTR_FP_MUL", + "TNOx_BEATS_RD_FTC", + "TNOx_BEATS_RD_FTC_EXT", + "TNOx_BEATS_RD_LSC", + "TNOx_BEATS_RD_LSC_EXT", + "TNOx_BEATS_RD_TEX", + "TNOx_BEATS_RD_TEX_EXT", + "TNOx_BEATS_RD_OTHER", + "TNOx_BEATS_WR_LSC_WB", + "TNOx_BEATS_WR_TIB", + "TNOx_BEATS_WR_LSC_OTHER", + + /* Performance counters for the Memory System */ + "", + "", + "", + "", + "TNOx_MMU_REQUESTS", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "TNOx_L2_RD_MSG_IN", + "TNOx_L2_RD_MSG_IN_STALL", + "TNOx_L2_WR_MSG_IN", + "TNOx_L2_WR_MSG_IN_STALL", + "TNOx_L2_SNP_MSG_IN", + "TNOx_L2_SNP_MSG_IN_STALL", + "TNOx_L2_RD_MSG_OUT", + "TNOx_L2_RD_MSG_OUT_STALL", + "TNOx_L2_WR_MSG_OUT", + "TNOx_L2_ANY_LOOKUP", + "TNOx_L2_READ_LOOKUP", + "TNOx_L2_WRITE_LOOKUP", + "TNOx_L2_EXT_SNOOP_LOOKUP", + "TNOx_L2_EXT_READ", + "TNOx_L2_EXT_READ_NOSNP", + "TNOx_L2_EXT_READ_UNIQUE", + "TNOx_L2_EXT_READ_BEATS", + "TNOx_L2_EXT_AR_STALL", + "TNOx_L2_EXT_AR_CNT_Q1", + "TNOx_L2_EXT_AR_CNT_Q2", + "TNOx_L2_EXT_AR_CNT_Q3", + "TNOx_L2_EXT_RRESP_0_127", + "TNOx_L2_EXT_RRESP_128_191", + "TNOx_L2_EXT_RRESP_192_255", + "TNOx_L2_EXT_RRESP_256_319", + "TNOx_L2_EXT_RRESP_320_383", + "TNOx_L2_EXT_WRITE", + "TNOx_L2_EXT_WRITE_NOSNP_FULL", + "TNOx_L2_EXT_WRITE_NOSNP_PTL", + "TNOx_L2_EXT_WRITE_SNP_FULL", + "TNOx_L2_EXT_WRITE_SNP_PTL", + "TNOx_L2_EXT_WRITE_BEATS", + "TNOx_L2_EXT_W_STALL", + "TNOx_L2_EXT_AW_CNT_Q1", + "TNOx_L2_EXT_AW_CNT_Q2", + "TNOx_L2_EXT_AW_CNT_Q3", + "TNOx_L2_EXT_SNOOP", + "TNOx_L2_EXT_SNOOP_STALL", + "TNOx_L2_EXT_SNOOP_RESP_CLEAN", + "TNOx_L2_EXT_SNOOP_RESP_DATA", + "TNOx_L2_EXT_SNOOP_INTERNAL", + "", + "", + "", + "", + "", + "", + "", +}; + +#endif /* _KBASE_GATOR_HWCNT_NAMES_TNOX_H_ */ diff --git a/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_tsix.h b/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_tsix.h new file mode 100644 index 00000000000000..fb6a1437a1f671 --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_tsix.h @@ -0,0 +1,291 @@ +/* + * + * (C) COPYRIGHT 2016-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +/* + * This header was autogenerated, it should not be edited. + */ + +#ifndef _KBASE_GATOR_HWCNT_NAMES_TSIX_H_ +#define _KBASE_GATOR_HWCNT_NAMES_TSIX_H_ + +static const char * const hardware_counters_mali_tSIx[] = { + /* Performance counters for the Job Manager */ + "", + "", + "", + "", + "TSIx_MESSAGES_SENT", + "TSIx_MESSAGES_RECEIVED", + "TSIx_GPU_ACTIVE", + "TSIx_IRQ_ACTIVE", + "TSIx_JS0_JOBS", + "TSIx_JS0_TASKS", + "TSIx_JS0_ACTIVE", + "", + "TSIx_JS0_WAIT_READ", + "TSIx_JS0_WAIT_ISSUE", + "TSIx_JS0_WAIT_DEPEND", + "TSIx_JS0_WAIT_FINISH", + "TSIx_JS1_JOBS", + "TSIx_JS1_TASKS", + "TSIx_JS1_ACTIVE", + "", + "TSIx_JS1_WAIT_READ", + "TSIx_JS1_WAIT_ISSUE", + "TSIx_JS1_WAIT_DEPEND", + "TSIx_JS1_WAIT_FINISH", + "TSIx_JS2_JOBS", + "TSIx_JS2_TASKS", + "TSIx_JS2_ACTIVE", + "", + "TSIx_JS2_WAIT_READ", + "TSIx_JS2_WAIT_ISSUE", + "TSIx_JS2_WAIT_DEPEND", + "TSIx_JS2_WAIT_FINISH", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + + /* Performance counters for the Tiler */ + "", + "", + "", + "", + "TSIx_TILER_ACTIVE", + "TSIx_JOBS_PROCESSED", + "TSIx_TRIANGLES", + "TSIx_LINES", + "TSIx_POINTS", + "TSIx_FRONT_FACING", + "TSIx_BACK_FACING", + "TSIx_PRIM_VISIBLE", + "TSIx_PRIM_CULLED", + "TSIx_PRIM_CLIPPED", + "TSIx_PRIM_SAT_CULLED", + "TSIx_BIN_ALLOC_INIT", + "TSIx_BIN_ALLOC_OVERFLOW", + "TSIx_BUS_READ", + "", + "TSIx_BUS_WRITE", + "TSIx_LOADING_DESC", + "TSIx_IDVS_POS_SHAD_REQ", + "TSIx_IDVS_POS_SHAD_WAIT", + "TSIx_IDVS_POS_SHAD_STALL", + "TSIx_IDVS_POS_FIFO_FULL", + "TSIx_PREFETCH_STALL", + "TSIx_VCACHE_HIT", + "TSIx_VCACHE_MISS", + "TSIx_VCACHE_LINE_WAIT", + "TSIx_VFETCH_POS_READ_WAIT", + "TSIx_VFETCH_VERTEX_WAIT", + "TSIx_VFETCH_STALL", + "TSIx_PRIMASSY_STALL", + "TSIx_BBOX_GEN_STALL", + "TSIx_IDVS_VBU_HIT", + "TSIx_IDVS_VBU_MISS", + "TSIx_IDVS_VBU_LINE_DEALLOCATE", + "TSIx_IDVS_VAR_SHAD_REQ", + "TSIx_IDVS_VAR_SHAD_STALL", + "TSIx_BINNER_STALL", + "TSIx_ITER_STALL", + "TSIx_COMPRESS_MISS", + "TSIx_COMPRESS_STALL", + "TSIx_PCACHE_HIT", + "TSIx_PCACHE_MISS", + "TSIx_PCACHE_MISS_STALL", + "TSIx_PCACHE_EVICT_STALL", + "TSIx_PMGR_PTR_WR_STALL", + "TSIx_PMGR_PTR_RD_STALL", + "TSIx_PMGR_CMD_WR_STALL", + "TSIx_WRBUF_ACTIVE", + "TSIx_WRBUF_HIT", + "TSIx_WRBUF_MISS", + "TSIx_WRBUF_NO_FREE_LINE_STALL", + "TSIx_WRBUF_NO_AXI_ID_STALL", + "TSIx_WRBUF_AXI_STALL", + "", + "", + "", + "TSIx_UTLB_TRANS", + "TSIx_UTLB_TRANS_HIT", + "TSIx_UTLB_TRANS_STALL", + "TSIx_UTLB_TRANS_MISS_DELAY", + "TSIx_UTLB_MMU_REQ", + + /* Performance counters for the Shader Core */ + "", + "", + "", + "", + "TSIx_FRAG_ACTIVE", + "TSIx_FRAG_PRIMITIVES", + "TSIx_FRAG_PRIM_RAST", + "TSIx_FRAG_FPK_ACTIVE", + "TSIx_FRAG_STARVING", + "TSIx_FRAG_WARPS", + "TSIx_FRAG_PARTIAL_WARPS", + "TSIx_FRAG_QUADS_RAST", + "TSIx_FRAG_QUADS_EZS_TEST", + "TSIx_FRAG_QUADS_EZS_UPDATE", + "TSIx_FRAG_QUADS_EZS_KILL", + "TSIx_FRAG_LZS_TEST", + "TSIx_FRAG_LZS_KILL", + "", + "TSIx_FRAG_PTILES", + "TSIx_FRAG_TRANS_ELIM", + "TSIx_QUAD_FPK_KILLER", + "", + "TSIx_COMPUTE_ACTIVE", + "TSIx_COMPUTE_TASKS", + "TSIx_COMPUTE_WARPS", + "TSIx_COMPUTE_STARVING", + "TSIx_EXEC_CORE_ACTIVE", + "TSIx_EXEC_ACTIVE", + "TSIx_EXEC_INSTR_COUNT", + "TSIx_EXEC_INSTR_DIVERGED", + "TSIx_EXEC_INSTR_STARVING", + "TSIx_ARITH_INSTR_SINGLE_FMA", + "TSIx_ARITH_INSTR_DOUBLE", + "TSIx_ARITH_INSTR_MSG", + "TSIx_ARITH_INSTR_MSG_ONLY", + "TSIx_TEX_MSGI_NUM_QUADS", + "TSIx_TEX_DFCH_NUM_PASSES", + "TSIx_TEX_DFCH_NUM_PASSES_MISS", + "TSIx_TEX_DFCH_NUM_PASSES_MIP_MAP", + "TSIx_TEX_TIDX_NUM_SPLIT_MIP_MAP", + "TSIx_TEX_TFCH_NUM_LINES_FETCHED", + "TSIx_TEX_TFCH_NUM_LINES_FETCHED_BLOCK", + "TSIx_TEX_TFCH_NUM_OPERATIONS", + "TSIx_TEX_FILT_NUM_OPERATIONS", + "TSIx_LS_MEM_READ_FULL", + "TSIx_LS_MEM_READ_SHORT", + "TSIx_LS_MEM_WRITE_FULL", + "TSIx_LS_MEM_WRITE_SHORT", + "TSIx_LS_MEM_ATOMIC", + "TSIx_VARY_INSTR", + "TSIx_VARY_SLOT_32", + "TSIx_VARY_SLOT_16", + "TSIx_ATTR_INSTR", + "TSIx_ARITH_INSTR_FP_MUL", + "TSIx_BEATS_RD_FTC", + "TSIx_BEATS_RD_FTC_EXT", + "TSIx_BEATS_RD_LSC", + "TSIx_BEATS_RD_LSC_EXT", + "TSIx_BEATS_RD_TEX", + "TSIx_BEATS_RD_TEX_EXT", + "TSIx_BEATS_RD_OTHER", + "TSIx_BEATS_WR_LSC", + "TSIx_BEATS_WR_TIB", + "", + + /* Performance counters for the Memory System */ + "", + "", + "", + "", + "TSIx_MMU_REQUESTS", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "TSIx_L2_RD_MSG_IN", + "TSIx_L2_RD_MSG_IN_STALL", + "TSIx_L2_WR_MSG_IN", + "TSIx_L2_WR_MSG_IN_STALL", + "TSIx_L2_SNP_MSG_IN", + "TSIx_L2_SNP_MSG_IN_STALL", + "TSIx_L2_RD_MSG_OUT", + "TSIx_L2_RD_MSG_OUT_STALL", + "TSIx_L2_WR_MSG_OUT", + "TSIx_L2_ANY_LOOKUP", + "TSIx_L2_READ_LOOKUP", + "TSIx_L2_WRITE_LOOKUP", + "TSIx_L2_EXT_SNOOP_LOOKUP", + "TSIx_L2_EXT_READ", + "TSIx_L2_EXT_READ_NOSNP", + "TSIx_L2_EXT_READ_UNIQUE", + "TSIx_L2_EXT_READ_BEATS", + "TSIx_L2_EXT_AR_STALL", + "TSIx_L2_EXT_AR_CNT_Q1", + "TSIx_L2_EXT_AR_CNT_Q2", + "TSIx_L2_EXT_AR_CNT_Q3", + "TSIx_L2_EXT_RRESP_0_127", + "TSIx_L2_EXT_RRESP_128_191", + "TSIx_L2_EXT_RRESP_192_255", + "TSIx_L2_EXT_RRESP_256_319", + "TSIx_L2_EXT_RRESP_320_383", + "TSIx_L2_EXT_WRITE", + "TSIx_L2_EXT_WRITE_NOSNP_FULL", + "TSIx_L2_EXT_WRITE_NOSNP_PTL", + "TSIx_L2_EXT_WRITE_SNP_FULL", + "TSIx_L2_EXT_WRITE_SNP_PTL", + "TSIx_L2_EXT_WRITE_BEATS", + "TSIx_L2_EXT_W_STALL", + "TSIx_L2_EXT_AW_CNT_Q1", + "TSIx_L2_EXT_AW_CNT_Q2", + "TSIx_L2_EXT_AW_CNT_Q3", + "TSIx_L2_EXT_SNOOP", + "TSIx_L2_EXT_SNOOP_STALL", + "TSIx_L2_EXT_SNOOP_RESP_CLEAN", + "TSIx_L2_EXT_SNOOP_RESP_DATA", + "TSIx_L2_EXT_SNOOP_INTERNAL", + "", + "", + "", + "", + "", + "", + "", +}; + +#endif /* _KBASE_GATOR_HWCNT_NAMES_TSIX_H_ */ diff --git a/drivers/gpu/arm/midgard/mali_kbase_gpu_id.h b/drivers/gpu/arm/midgard/mali_kbase_gpu_id.h new file mode 100644 index 00000000000000..32b95137739e8c --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_gpu_id.h @@ -0,0 +1,123 @@ +/* + * + * (C) COPYRIGHT 2015-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + +#ifndef _KBASE_GPU_ID_H_ +#define _KBASE_GPU_ID_H_ + +/* GPU_ID register */ +#define GPU_ID_VERSION_STATUS_SHIFT 0 +#define GPU_ID_VERSION_MINOR_SHIFT 4 +#define GPU_ID_VERSION_MAJOR_SHIFT 12 +#define GPU_ID_VERSION_PRODUCT_ID_SHIFT 16 +#define GPU_ID_VERSION_STATUS (0xF << GPU_ID_VERSION_STATUS_SHIFT) +#define GPU_ID_VERSION_MINOR (0xFF << GPU_ID_VERSION_MINOR_SHIFT) +#define GPU_ID_VERSION_MAJOR (0xF << GPU_ID_VERSION_MAJOR_SHIFT) +#define GPU_ID_VERSION_PRODUCT_ID (0xFFFF << GPU_ID_VERSION_PRODUCT_ID_SHIFT) + +/* Values for GPU_ID_VERSION_PRODUCT_ID bitfield */ +#define GPU_ID_PI_T60X 0x6956 +#define GPU_ID_PI_T62X 0x0620 +#define GPU_ID_PI_T76X 0x0750 +#define GPU_ID_PI_T72X 0x0720 +#define GPU_ID_PI_TFRX 0x0880 +#define GPU_ID_PI_T86X 0x0860 +#define GPU_ID_PI_T82X 0x0820 +#define GPU_ID_PI_T83X 0x0830 + +/* New GPU ID format when PRODUCT_ID is >= 0x1000 (and not 0x6956) */ +#define GPU_ID_PI_NEW_FORMAT_START 0x1000 +#define GPU_ID_IS_NEW_FORMAT(product_id) ((product_id) != GPU_ID_PI_T60X && \ + (product_id) >= \ + GPU_ID_PI_NEW_FORMAT_START) + +#define GPU_ID2_VERSION_STATUS_SHIFT 0 +#define GPU_ID2_VERSION_MINOR_SHIFT 4 +#define GPU_ID2_VERSION_MAJOR_SHIFT 12 +#define GPU_ID2_PRODUCT_MAJOR_SHIFT 16 +#define GPU_ID2_ARCH_REV_SHIFT 20 +#define GPU_ID2_ARCH_MINOR_SHIFT 24 +#define GPU_ID2_ARCH_MAJOR_SHIFT 28 +#define GPU_ID2_VERSION_STATUS (0xF << GPU_ID2_VERSION_STATUS_SHIFT) +#define GPU_ID2_VERSION_MINOR (0xFF << GPU_ID2_VERSION_MINOR_SHIFT) +#define GPU_ID2_VERSION_MAJOR (0xF << GPU_ID2_VERSION_MAJOR_SHIFT) +#define GPU_ID2_PRODUCT_MAJOR (0xF << GPU_ID2_PRODUCT_MAJOR_SHIFT) +#define GPU_ID2_ARCH_REV (0xF << GPU_ID2_ARCH_REV_SHIFT) +#define GPU_ID2_ARCH_MINOR (0xF << GPU_ID2_ARCH_MINOR_SHIFT) +#define GPU_ID2_ARCH_MAJOR (0xF << GPU_ID2_ARCH_MAJOR_SHIFT) +#define GPU_ID2_PRODUCT_MODEL (GPU_ID2_ARCH_MAJOR | GPU_ID2_PRODUCT_MAJOR) +#define GPU_ID2_VERSION (GPU_ID2_VERSION_MAJOR | \ + GPU_ID2_VERSION_MINOR | \ + GPU_ID2_VERSION_STATUS) + +/* Helper macro to create a partial GPU_ID (new format) that defines + a product ignoring its version. */ +#define GPU_ID2_PRODUCT_MAKE(arch_major, arch_minor, arch_rev, product_major) \ + (((arch_major) << GPU_ID2_ARCH_MAJOR_SHIFT) | \ + ((arch_minor) << GPU_ID2_ARCH_MINOR_SHIFT) | \ + ((arch_rev) << GPU_ID2_ARCH_REV_SHIFT) | \ + ((product_major) << GPU_ID2_PRODUCT_MAJOR_SHIFT)) + +/* Helper macro to create a partial GPU_ID (new format) that specifies the + revision (major, minor, status) of a product */ +#define GPU_ID2_VERSION_MAKE(version_major, version_minor, version_status) \ + (((version_major) << GPU_ID2_VERSION_MAJOR_SHIFT) | \ + ((version_minor) << GPU_ID2_VERSION_MINOR_SHIFT) | \ + ((version_status) << GPU_ID2_VERSION_STATUS_SHIFT)) + +/* Helper macro to create a complete GPU_ID (new format) */ +#define GPU_ID2_MAKE(arch_major, arch_minor, arch_rev, product_major, \ + version_major, version_minor, version_status) \ + (GPU_ID2_PRODUCT_MAKE(arch_major, arch_minor, arch_rev, \ + product_major) | \ + GPU_ID2_VERSION_MAKE(version_major, version_minor, \ + version_status)) + +/* Helper macro to create a partial GPU_ID (new format) that identifies + a particular GPU model by its arch_major and product_major. */ +#define GPU_ID2_MODEL_MAKE(arch_major, product_major) \ + (((arch_major) << GPU_ID2_ARCH_MAJOR_SHIFT) | \ + ((product_major) << GPU_ID2_PRODUCT_MAJOR_SHIFT)) + +/* Strip off the non-relevant bits from a product_id value and make it suitable + for comparison against the GPU_ID2_PRODUCT_xxx values which identify a GPU + model. */ +#define GPU_ID2_MODEL_MATCH_VALUE(product_id) \ + (((product_id) << GPU_ID2_PRODUCT_MAJOR_SHIFT) & \ + GPU_ID2_PRODUCT_MODEL) + +#define GPU_ID2_PRODUCT_TMIX GPU_ID2_MODEL_MAKE(6u, 0) +#define GPU_ID2_PRODUCT_THEX GPU_ID2_MODEL_MAKE(6u, 1) +#define GPU_ID2_PRODUCT_TSIX GPU_ID2_MODEL_MAKE(7u, 0) +#define GPU_ID2_PRODUCT_TDVX GPU_ID2_MODEL_MAKE(7u, 3) +#define GPU_ID2_PRODUCT_TNOX GPU_ID2_MODEL_MAKE(7u, 1) +#define GPU_ID2_PRODUCT_TGOX GPU_ID2_MODEL_MAKE(7u, 2) +#define GPU_ID2_PRODUCT_TKAX GPU_ID2_MODEL_MAKE(8u, 0) +#define GPU_ID2_PRODUCT_TTRX GPU_ID2_MODEL_MAKE(8u, 1) +#define GPU_ID2_PRODUCT_TBOX GPU_ID2_MODEL_MAKE(8u, 2) + +/* Values for GPU_ID_VERSION_STATUS field for PRODUCT_ID GPU_ID_PI_T60X */ +#define GPU_ID_S_15DEV0 0x1 +#define GPU_ID_S_EAC 0x2 + +/* Helper macro to create a GPU_ID assuming valid values for id, major, + minor, status */ +#define GPU_ID_MAKE(id, major, minor, status) \ + (((id) << GPU_ID_VERSION_PRODUCT_ID_SHIFT) | \ + ((major) << GPU_ID_VERSION_MAJOR_SHIFT) | \ + ((minor) << GPU_ID_VERSION_MINOR_SHIFT) | \ + ((status) << GPU_ID_VERSION_STATUS_SHIFT)) + +#endif /* _KBASE_GPU_ID_H_ */ diff --git a/drivers/gpu/arm/midgard/mali_kbase_gpu_memory_debugfs.c b/drivers/gpu/arm/midgard/mali_kbase_gpu_memory_debugfs.c new file mode 100644 index 00000000000000..6df0a1cb1264a2 --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_gpu_memory_debugfs.c @@ -0,0 +1,97 @@ +/* + * + * (C) COPYRIGHT 2012-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +#include + +#ifdef CONFIG_DEBUG_FS +/** Show callback for the @c gpu_memory debugfs file. + * + * This function is called to get the contents of the @c gpu_memory debugfs + * file. This is a report of current gpu memory usage. + * + * @param sfile The debugfs entry + * @param data Data associated with the entry + * + * @return 0 if successfully prints data in debugfs entry file + * -1 if it encountered an error + */ + +static int kbasep_gpu_memory_seq_show(struct seq_file *sfile, void *data) +{ + struct list_head *entry; + const struct list_head *kbdev_list; + + kbdev_list = kbase_dev_list_get(); + list_for_each(entry, kbdev_list) { + struct kbase_device *kbdev = NULL; + struct kbasep_kctx_list_element *element; + + kbdev = list_entry(entry, struct kbase_device, entry); + /* output the total memory usage and cap for this device */ + seq_printf(sfile, "%-16s %10u\n", + kbdev->devname, + atomic_read(&(kbdev->memdev.used_pages))); + mutex_lock(&kbdev->kctx_list_lock); + list_for_each_entry(element, &kbdev->kctx_list, link) { + /* output the memory usage and cap for each kctx + * opened on this device */ + seq_printf(sfile, " %s-0x%p %10u\n", + "kctx", + element->kctx, + atomic_read(&(element->kctx->used_pages))); + } + mutex_unlock(&kbdev->kctx_list_lock); + } + kbase_dev_list_put(kbdev_list); + return 0; +} + +/* + * File operations related to debugfs entry for gpu_memory + */ +static int kbasep_gpu_memory_debugfs_open(struct inode *in, struct file *file) +{ + return single_open(file, kbasep_gpu_memory_seq_show , NULL); +} + +static const struct file_operations kbasep_gpu_memory_debugfs_fops = { + .open = kbasep_gpu_memory_debugfs_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +/* + * Initialize debugfs entry for gpu_memory + */ +void kbasep_gpu_memory_debugfs_init(struct kbase_device *kbdev) +{ + debugfs_create_file("gpu_memory", S_IRUGO, + kbdev->mali_debugfs_directory, NULL, + &kbasep_gpu_memory_debugfs_fops); + return; +} + +#else +/* + * Stub functions for when debugfs is disabled + */ +void kbasep_gpu_memory_debugfs_init(struct kbase_device *kbdev) +{ + return; +} +#endif diff --git a/drivers/gpu/arm/midgard/mali_kbase_gpu_memory_debugfs.h b/drivers/gpu/arm/midgard/mali_kbase_gpu_memory_debugfs.h new file mode 100644 index 00000000000000..7045693eb91096 --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_gpu_memory_debugfs.h @@ -0,0 +1,37 @@ +/* + * + * (C) COPYRIGHT 2012-2014, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + + +/** + * @file mali_kbase_gpu_memory_debugfs.h + * Header file for gpu_memory entry in debugfs + * + */ + +#ifndef _KBASE_GPU_MEMORY_DEBUGFS_H +#define _KBASE_GPU_MEMORY_DEBUGFS_H + +#include +#include + +/** + * @brief Initialize gpu_memory debugfs entry + */ +void kbasep_gpu_memory_debugfs_init(struct kbase_device *kbdev); + +#endif /*_KBASE_GPU_MEMORY_DEBUGFS_H*/ diff --git a/drivers/gpu/arm/midgard/mali_kbase_gpuprops.c b/drivers/gpu/arm/midgard/mali_kbase_gpuprops.c new file mode 100644 index 00000000000000..fe6d4581a872bb --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_gpuprops.c @@ -0,0 +1,513 @@ +/* + * + * (C) COPYRIGHT 2011-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + + +/* + * Base kernel property query APIs + */ + +#include +#include +#include +#include +#include +#include "mali_kbase_ioctl.h" +#include + +/** + * KBASE_UBFX32 - Extracts bits from a 32-bit bitfield. + * @value: The value from which to extract bits. + * @offset: The first bit to extract (0 being the LSB). + * @size: The number of bits to extract. + * + * Context: @offset + @size <= 32. + * + * Return: Bits [@offset, @offset + @size) from @value. + */ +/* from mali_cdsb.h */ +#define KBASE_UBFX32(value, offset, size) \ + (((u32)(value) >> (u32)(offset)) & (u32)((1ULL << (u32)(size)) - 1)) + +int kbase_gpuprops_uk_get_props(struct kbase_context *kctx, struct kbase_uk_gpuprops * const kbase_props) +{ + kbase_gpu_clk_speed_func get_gpu_speed_mhz; + u32 gpu_speed_mhz; + int rc = 1; + + KBASE_DEBUG_ASSERT(NULL != kctx); + KBASE_DEBUG_ASSERT(NULL != kbase_props); + + /* Current GPU speed is requested from the system integrator via the GPU_SPEED_FUNC function. + * If that function fails, or the function is not provided by the system integrator, we report the maximum + * GPU speed as specified by GPU_FREQ_KHZ_MAX. + */ + get_gpu_speed_mhz = (kbase_gpu_clk_speed_func) GPU_SPEED_FUNC; + if (get_gpu_speed_mhz != NULL) { + rc = get_gpu_speed_mhz(&gpu_speed_mhz); +#ifdef CONFIG_MALI_DEBUG + /* Issue a warning message when the reported GPU speed falls outside the min/max range */ + if (rc == 0) { + u32 gpu_speed_khz = gpu_speed_mhz * 1000; + + if (gpu_speed_khz < kctx->kbdev->gpu_props.props.core_props.gpu_freq_khz_min || + gpu_speed_khz > kctx->kbdev->gpu_props.props.core_props.gpu_freq_khz_max) + dev_warn(kctx->kbdev->dev, "GPU Speed is outside of min/max range (got %lu Khz, min %lu Khz, max %lu Khz)\n", + (unsigned long)gpu_speed_khz, + (unsigned long)kctx->kbdev->gpu_props.props.core_props.gpu_freq_khz_min, + (unsigned long)kctx->kbdev->gpu_props.props.core_props.gpu_freq_khz_max); + } +#endif /* CONFIG_MALI_DEBUG */ + } + if (kctx->kbdev->clock) { + gpu_speed_mhz = clk_get_rate(kctx->kbdev->clock) / 1000000; + rc = 0; + } + if (rc != 0) + gpu_speed_mhz = kctx->kbdev->gpu_props.props.core_props.gpu_freq_khz_max / 1000; + + kctx->kbdev->gpu_props.props.core_props.gpu_speed_mhz = gpu_speed_mhz; + + memcpy(&kbase_props->props, &kctx->kbdev->gpu_props.props, sizeof(kbase_props->props)); + + /* Before API 8.2 they expect L3 cache info here, which was always 0 */ + if (kctx->api_version < KBASE_API_VERSION(8, 2)) + kbase_props->props.raw_props.suspend_size = 0; + + return 0; +} + +static void kbase_gpuprops_construct_coherent_groups(base_gpu_props * const props) +{ + struct mali_base_gpu_coherent_group *current_group; + u64 group_present; + u64 group_mask; + u64 first_set, first_set_prev; + u32 num_groups = 0; + + KBASE_DEBUG_ASSERT(NULL != props); + + props->coherency_info.coherency = props->raw_props.mem_features; + props->coherency_info.num_core_groups = hweight64(props->raw_props.l2_present); + + if (props->coherency_info.coherency & GROUPS_L2_COHERENT) { + /* Group is l2 coherent */ + group_present = props->raw_props.l2_present; + } else { + /* Group is l1 coherent */ + group_present = props->raw_props.shader_present; + } + + /* + * The coherent group mask can be computed from the l2 present + * register. + * + * For the coherent group n: + * group_mask[n] = (first_set[n] - 1) & ~(first_set[n-1] - 1) + * where first_set is group_present with only its nth set-bit kept + * (i.e. the position from where a new group starts). + * + * For instance if the groups are l2 coherent and l2_present=0x0..01111: + * The first mask is: + * group_mask[1] = (first_set[1] - 1) & ~(first_set[0] - 1) + * = (0x0..010 - 1) & ~(0x0..01 - 1) + * = 0x0..00f + * The second mask is: + * group_mask[2] = (first_set[2] - 1) & ~(first_set[1] - 1) + * = (0x0..100 - 1) & ~(0x0..010 - 1) + * = 0x0..0f0 + * And so on until all the bits from group_present have been cleared + * (i.e. there is no group left). + */ + + current_group = props->coherency_info.group; + first_set = group_present & ~(group_present - 1); + + while (group_present != 0 && num_groups < BASE_MAX_COHERENT_GROUPS) { + group_present -= first_set; /* Clear the current group bit */ + first_set_prev = first_set; + + first_set = group_present & ~(group_present - 1); + group_mask = (first_set - 1) & ~(first_set_prev - 1); + + /* Populate the coherent_group structure for each group */ + current_group->core_mask = group_mask & props->raw_props.shader_present; + current_group->num_cores = hweight64(current_group->core_mask); + + num_groups++; + current_group++; + } + + if (group_present != 0) + pr_warn("Too many coherent groups (keeping only %d groups).\n", BASE_MAX_COHERENT_GROUPS); + + props->coherency_info.num_groups = num_groups; +} + +/** + * kbase_gpuprops_get_props - Get the GPU configuration + * @gpu_props: The &base_gpu_props structure + * @kbdev: The &struct kbase_device structure for the device + * + * Fill the &base_gpu_props structure with values from the GPU configuration + * registers. Only the raw properties are filled in this function + */ +static void kbase_gpuprops_get_props(base_gpu_props * const gpu_props, struct kbase_device *kbdev) +{ + struct kbase_gpuprops_regdump regdump; + int i; + + KBASE_DEBUG_ASSERT(NULL != kbdev); + KBASE_DEBUG_ASSERT(NULL != gpu_props); + + /* Dump relevant registers */ + kbase_backend_gpuprops_get(kbdev, ®dump); + + gpu_props->raw_props.gpu_id = regdump.gpu_id; + gpu_props->raw_props.tiler_features = regdump.tiler_features; + gpu_props->raw_props.mem_features = regdump.mem_features; + gpu_props->raw_props.mmu_features = regdump.mmu_features; + gpu_props->raw_props.l2_features = regdump.l2_features; + gpu_props->raw_props.suspend_size = regdump.suspend_size; + + gpu_props->raw_props.as_present = regdump.as_present; + gpu_props->raw_props.js_present = regdump.js_present; + gpu_props->raw_props.shader_present = + ((u64) regdump.shader_present_hi << 32) + + regdump.shader_present_lo; + gpu_props->raw_props.tiler_present = + ((u64) regdump.tiler_present_hi << 32) + + regdump.tiler_present_lo; + gpu_props->raw_props.l2_present = + ((u64) regdump.l2_present_hi << 32) + + regdump.l2_present_lo; + gpu_props->raw_props.stack_present = + ((u64) regdump.stack_present_hi << 32) + + regdump.stack_present_lo; + + for (i = 0; i < GPU_MAX_JOB_SLOTS; i++) + gpu_props->raw_props.js_features[i] = regdump.js_features[i]; + + for (i = 0; i < BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS; i++) + gpu_props->raw_props.texture_features[i] = regdump.texture_features[i]; + + gpu_props->raw_props.thread_max_barrier_size = regdump.thread_max_barrier_size; + gpu_props->raw_props.thread_max_threads = regdump.thread_max_threads; + gpu_props->raw_props.thread_max_workgroup_size = regdump.thread_max_workgroup_size; + gpu_props->raw_props.thread_features = regdump.thread_features; +} + +void kbase_gpuprops_update_core_props_gpu_id(base_gpu_props * const gpu_props) +{ + gpu_props->core_props.version_status = + KBASE_UBFX32(gpu_props->raw_props.gpu_id, 0U, 4); + gpu_props->core_props.minor_revision = + KBASE_UBFX32(gpu_props->raw_props.gpu_id, 4U, 8); + gpu_props->core_props.major_revision = + KBASE_UBFX32(gpu_props->raw_props.gpu_id, 12U, 4); + gpu_props->core_props.product_id = + KBASE_UBFX32(gpu_props->raw_props.gpu_id, 16U, 16); +} + +/** + * kbase_gpuprops_calculate_props - Calculate the derived properties + * @gpu_props: The &base_gpu_props structure + * @kbdev: The &struct kbase_device structure for the device + * + * Fill the &base_gpu_props structure with values derived from the GPU + * configuration registers + */ +static void kbase_gpuprops_calculate_props(base_gpu_props * const gpu_props, struct kbase_device *kbdev) +{ + int i; + + /* Populate the base_gpu_props structure */ + kbase_gpuprops_update_core_props_gpu_id(gpu_props); + gpu_props->core_props.log2_program_counter_size = KBASE_GPU_PC_SIZE_LOG2; + gpu_props->core_props.gpu_available_memory_size = totalram_pages() << PAGE_SHIFT; + + for (i = 0; i < BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS; i++) + gpu_props->core_props.texture_features[i] = gpu_props->raw_props.texture_features[i]; + + gpu_props->l2_props.log2_line_size = KBASE_UBFX32(gpu_props->raw_props.l2_features, 0U, 8); + gpu_props->l2_props.log2_cache_size = KBASE_UBFX32(gpu_props->raw_props.l2_features, 16U, 8); + + /* Field with number of l2 slices is added to MEM_FEATURES register + * since t76x. Below code assumes that for older GPU reserved bits will + * be read as zero. */ + gpu_props->l2_props.num_l2_slices = + KBASE_UBFX32(gpu_props->raw_props.mem_features, 8U, 4) + 1; + + gpu_props->tiler_props.bin_size_bytes = 1 << KBASE_UBFX32(gpu_props->raw_props.tiler_features, 0U, 6); + gpu_props->tiler_props.max_active_levels = KBASE_UBFX32(gpu_props->raw_props.tiler_features, 8U, 4); + + if (gpu_props->raw_props.thread_max_threads == 0) + gpu_props->thread_props.max_threads = THREAD_MT_DEFAULT; + else + gpu_props->thread_props.max_threads = gpu_props->raw_props.thread_max_threads; + + if (gpu_props->raw_props.thread_max_workgroup_size == 0) + gpu_props->thread_props.max_workgroup_size = THREAD_MWS_DEFAULT; + else + gpu_props->thread_props.max_workgroup_size = gpu_props->raw_props.thread_max_workgroup_size; + + if (gpu_props->raw_props.thread_max_barrier_size == 0) + gpu_props->thread_props.max_barrier_size = THREAD_MBS_DEFAULT; + else + gpu_props->thread_props.max_barrier_size = gpu_props->raw_props.thread_max_barrier_size; + + gpu_props->thread_props.max_registers = KBASE_UBFX32(gpu_props->raw_props.thread_features, 0U, 16); + gpu_props->thread_props.max_task_queue = KBASE_UBFX32(gpu_props->raw_props.thread_features, 16U, 8); + gpu_props->thread_props.max_thread_group_split = KBASE_UBFX32(gpu_props->raw_props.thread_features, 24U, 6); + gpu_props->thread_props.impl_tech = KBASE_UBFX32(gpu_props->raw_props.thread_features, 30U, 2); + + /* If values are not specified, then use defaults */ + if (gpu_props->thread_props.max_registers == 0) { + gpu_props->thread_props.max_registers = THREAD_MR_DEFAULT; + gpu_props->thread_props.max_task_queue = THREAD_MTQ_DEFAULT; + gpu_props->thread_props.max_thread_group_split = THREAD_MTGS_DEFAULT; + } + /* Initialize the coherent_group structure for each group */ + kbase_gpuprops_construct_coherent_groups(gpu_props); +} + +void kbase_gpuprops_set(struct kbase_device *kbdev) +{ + struct kbase_gpu_props *gpu_props; + struct gpu_raw_gpu_props *raw; + + KBASE_DEBUG_ASSERT(NULL != kbdev); + gpu_props = &kbdev->gpu_props; + raw = &gpu_props->props.raw_props; + + /* Initialize the base_gpu_props structure from the hardware */ + kbase_gpuprops_get_props(&gpu_props->props, kbdev); + + /* Populate the derived properties */ + kbase_gpuprops_calculate_props(&gpu_props->props, kbdev); + + /* Populate kbase-only fields */ + gpu_props->l2_props.associativity = KBASE_UBFX32(raw->l2_features, 8U, 8); + gpu_props->l2_props.external_bus_width = KBASE_UBFX32(raw->l2_features, 24U, 8); + + gpu_props->mem.core_group = KBASE_UBFX32(raw->mem_features, 0U, 1); + + gpu_props->mmu.va_bits = KBASE_UBFX32(raw->mmu_features, 0U, 8); + gpu_props->mmu.pa_bits = KBASE_UBFX32(raw->mmu_features, 8U, 8); + + gpu_props->num_cores = hweight64(raw->shader_present); + gpu_props->num_core_groups = hweight64(raw->l2_present); + gpu_props->num_address_spaces = hweight32(raw->as_present); + gpu_props->num_job_slots = hweight32(raw->js_present); +} + +void kbase_gpuprops_set_features(struct kbase_device *kbdev) +{ + base_gpu_props *gpu_props; + struct kbase_gpuprops_regdump regdump; + + gpu_props = &kbdev->gpu_props.props; + + /* Dump relevant registers */ + kbase_backend_gpuprops_get_features(kbdev, ®dump); + + /* + * Copy the raw value from the register, later this will get turned + * into the selected coherency mode. + * Additionally, add non-coherent mode, as this is always supported. + */ + gpu_props->raw_props.coherency_mode = regdump.coherency_features | + COHERENCY_FEATURE_BIT(COHERENCY_NONE); + + if (!kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_THREAD_GROUP_SPLIT)) + gpu_props->thread_props.max_thread_group_split = 0; +} + +static struct { + u32 type; + size_t offset; + int size; +} gpu_property_mapping[] = { +#define PROP(name, member) \ + {KBASE_GPUPROP_ ## name, offsetof(struct mali_base_gpu_props, member), \ + sizeof(((struct mali_base_gpu_props *)0)->member)} + PROP(PRODUCT_ID, core_props.product_id), + PROP(VERSION_STATUS, core_props.version_status), + PROP(MINOR_REVISION, core_props.minor_revision), + PROP(MAJOR_REVISION, core_props.major_revision), + PROP(GPU_SPEED_MHZ, core_props.gpu_speed_mhz), + PROP(GPU_FREQ_KHZ_MAX, core_props.gpu_freq_khz_max), + PROP(GPU_FREQ_KHZ_MIN, core_props.gpu_freq_khz_min), + PROP(LOG2_PROGRAM_COUNTER_SIZE, core_props.log2_program_counter_size), + PROP(TEXTURE_FEATURES_0, core_props.texture_features[0]), + PROP(TEXTURE_FEATURES_1, core_props.texture_features[1]), + PROP(TEXTURE_FEATURES_2, core_props.texture_features[2]), + PROP(GPU_AVAILABLE_MEMORY_SIZE, core_props.gpu_available_memory_size), + + PROP(L2_LOG2_LINE_SIZE, l2_props.log2_line_size), + PROP(L2_LOG2_CACHE_SIZE, l2_props.log2_cache_size), + PROP(L2_NUM_L2_SLICES, l2_props.num_l2_slices), + + PROP(TILER_BIN_SIZE_BYTES, tiler_props.bin_size_bytes), + PROP(TILER_MAX_ACTIVE_LEVELS, tiler_props.max_active_levels), + + PROP(MAX_THREADS, thread_props.max_threads), + PROP(MAX_WORKGROUP_SIZE, thread_props.max_workgroup_size), + PROP(MAX_BARRIER_SIZE, thread_props.max_barrier_size), + PROP(MAX_REGISTERS, thread_props.max_registers), + PROP(MAX_TASK_QUEUE, thread_props.max_task_queue), + PROP(MAX_THREAD_GROUP_SPLIT, thread_props.max_thread_group_split), + PROP(IMPL_TECH, thread_props.impl_tech), + + PROP(RAW_SHADER_PRESENT, raw_props.shader_present), + PROP(RAW_TILER_PRESENT, raw_props.tiler_present), + PROP(RAW_L2_PRESENT, raw_props.l2_present), + PROP(RAW_STACK_PRESENT, raw_props.stack_present), + PROP(RAW_L2_FEATURES, raw_props.l2_features), + PROP(RAW_SUSPEND_SIZE, raw_props.suspend_size), + PROP(RAW_MEM_FEATURES, raw_props.mem_features), + PROP(RAW_MMU_FEATURES, raw_props.mmu_features), + PROP(RAW_AS_PRESENT, raw_props.as_present), + PROP(RAW_JS_PRESENT, raw_props.js_present), + PROP(RAW_JS_FEATURES_0, raw_props.js_features[0]), + PROP(RAW_JS_FEATURES_1, raw_props.js_features[1]), + PROP(RAW_JS_FEATURES_2, raw_props.js_features[2]), + PROP(RAW_JS_FEATURES_3, raw_props.js_features[3]), + PROP(RAW_JS_FEATURES_4, raw_props.js_features[4]), + PROP(RAW_JS_FEATURES_5, raw_props.js_features[5]), + PROP(RAW_JS_FEATURES_6, raw_props.js_features[6]), + PROP(RAW_JS_FEATURES_7, raw_props.js_features[7]), + PROP(RAW_JS_FEATURES_8, raw_props.js_features[8]), + PROP(RAW_JS_FEATURES_9, raw_props.js_features[9]), + PROP(RAW_JS_FEATURES_10, raw_props.js_features[10]), + PROP(RAW_JS_FEATURES_11, raw_props.js_features[11]), + PROP(RAW_JS_FEATURES_12, raw_props.js_features[12]), + PROP(RAW_JS_FEATURES_13, raw_props.js_features[13]), + PROP(RAW_JS_FEATURES_14, raw_props.js_features[14]), + PROP(RAW_JS_FEATURES_15, raw_props.js_features[15]), + PROP(RAW_TILER_FEATURES, raw_props.tiler_features), + PROP(RAW_TEXTURE_FEATURES_0, raw_props.texture_features[0]), + PROP(RAW_TEXTURE_FEATURES_1, raw_props.texture_features[1]), + PROP(RAW_TEXTURE_FEATURES_2, raw_props.texture_features[2]), + PROP(RAW_GPU_ID, raw_props.gpu_id), + PROP(RAW_THREAD_MAX_THREADS, raw_props.thread_max_threads), + PROP(RAW_THREAD_MAX_WORKGROUP_SIZE, + raw_props.thread_max_workgroup_size), + PROP(RAW_THREAD_MAX_BARRIER_SIZE, raw_props.thread_max_barrier_size), + PROP(RAW_THREAD_FEATURES, raw_props.thread_features), + PROP(RAW_COHERENCY_MODE, raw_props.coherency_mode), + + PROP(COHERENCY_NUM_GROUPS, coherency_info.num_groups), + PROP(COHERENCY_NUM_CORE_GROUPS, coherency_info.num_core_groups), + PROP(COHERENCY_COHERENCY, coherency_info.coherency), + PROP(COHERENCY_GROUP_0, coherency_info.group[0].core_mask), + PROP(COHERENCY_GROUP_1, coherency_info.group[1].core_mask), + PROP(COHERENCY_GROUP_2, coherency_info.group[2].core_mask), + PROP(COHERENCY_GROUP_3, coherency_info.group[3].core_mask), + PROP(COHERENCY_GROUP_4, coherency_info.group[4].core_mask), + PROP(COHERENCY_GROUP_5, coherency_info.group[5].core_mask), + PROP(COHERENCY_GROUP_6, coherency_info.group[6].core_mask), + PROP(COHERENCY_GROUP_7, coherency_info.group[7].core_mask), + PROP(COHERENCY_GROUP_8, coherency_info.group[8].core_mask), + PROP(COHERENCY_GROUP_9, coherency_info.group[9].core_mask), + PROP(COHERENCY_GROUP_10, coherency_info.group[10].core_mask), + PROP(COHERENCY_GROUP_11, coherency_info.group[11].core_mask), + PROP(COHERENCY_GROUP_12, coherency_info.group[12].core_mask), + PROP(COHERENCY_GROUP_13, coherency_info.group[13].core_mask), + PROP(COHERENCY_GROUP_14, coherency_info.group[14].core_mask), + PROP(COHERENCY_GROUP_15, coherency_info.group[15].core_mask), + +#undef PROP +}; + +int kbase_gpuprops_populate_user_buffer(struct kbase_device *kbdev) +{ + struct kbase_gpu_props *kprops = &kbdev->gpu_props; + struct mali_base_gpu_props *props = &kprops->props; + u32 count = ARRAY_SIZE(gpu_property_mapping); + u32 i; + u32 size = 0; + u8 *p; + + for (i = 0; i < count; i++) { + /* 4 bytes for the ID, and the size of the property */ + size += 4 + gpu_property_mapping[i].size; + } + + kprops->prop_buffer_size = size; + kprops->prop_buffer = kmalloc(size, GFP_KERNEL); + + if (!kprops->prop_buffer) { + kprops->prop_buffer_size = 0; + return -ENOMEM; + } + + p = kprops->prop_buffer; + +#define WRITE_U8(v) (*p++ = (v) & 0xFF) +#define WRITE_U16(v) do { WRITE_U8(v); WRITE_U8((v) >> 8); } while (0) +#define WRITE_U32(v) do { WRITE_U16(v); WRITE_U16((v) >> 16); } while (0) +#define WRITE_U64(v) do { WRITE_U32(v); WRITE_U32((v) >> 32); } while (0) + + for (i = 0; i < count; i++) { + u32 type = gpu_property_mapping[i].type; + u8 type_size; + void *field = ((u8 *)props) + gpu_property_mapping[i].offset; + + switch (gpu_property_mapping[i].size) { + case 1: + type_size = KBASE_GPUPROP_VALUE_SIZE_U8; + break; + case 2: + type_size = KBASE_GPUPROP_VALUE_SIZE_U16; + break; + case 4: + type_size = KBASE_GPUPROP_VALUE_SIZE_U32; + break; + case 8: + type_size = KBASE_GPUPROP_VALUE_SIZE_U64; + break; + default: + dev_err(kbdev->dev, + "Invalid gpu_property_mapping type=%d size=%d", + type, gpu_property_mapping[i].size); + return -EINVAL; + } + + WRITE_U32((type<<2) | type_size); + + switch (type_size) { + case KBASE_GPUPROP_VALUE_SIZE_U8: + WRITE_U8(*((u8 *)field)); + break; + case KBASE_GPUPROP_VALUE_SIZE_U16: + WRITE_U16(*((u16 *)field)); + break; + case KBASE_GPUPROP_VALUE_SIZE_U32: + WRITE_U32(*((u32 *)field)); + break; + case KBASE_GPUPROP_VALUE_SIZE_U64: + WRITE_U64(*((u64 *)field)); + break; + default: /* Cannot be reached */ + WARN_ON(1); + return -EINVAL; + } + } + + return 0; +} diff --git a/drivers/gpu/arm/midgard/mali_kbase_gpuprops.h b/drivers/gpu/arm/midgard/mali_kbase_gpuprops.h new file mode 100644 index 00000000000000..57b3eaf9cd5326 --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_gpuprops.h @@ -0,0 +1,84 @@ +/* + * + * (C) COPYRIGHT 2011-2015,2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + + +/** + * @file mali_kbase_gpuprops.h + * Base kernel property query APIs + */ + +#ifndef _KBASE_GPUPROPS_H_ +#define _KBASE_GPUPROPS_H_ + +#include "mali_kbase_gpuprops_types.h" + +/* Forward definition - see mali_kbase.h */ +struct kbase_device; + +/** + * @brief Set up Kbase GPU properties. + * + * Set up Kbase GPU properties with information from the GPU registers + * + * @param kbdev The struct kbase_device structure for the device + */ +void kbase_gpuprops_set(struct kbase_device *kbdev); + +/** + * kbase_gpuprops_set_features - Set up Kbase GPU properties + * @kbdev: Device pointer + * + * This function sets up GPU properties that are dependent on the hardware + * features bitmask. This function must be preceeded by a call to + * kbase_hw_set_features_mask(). + */ +void kbase_gpuprops_set_features(struct kbase_device *kbdev); + +/** + * @brief Provide GPU properties to userside through UKU call. + * + * Fill the struct kbase_uk_gpuprops with values from GPU configuration registers. + * + * @param kctx The struct kbase_context structure + * @param kbase_props A copy of the struct kbase_uk_gpuprops structure from userspace + * + * @return 0 on success. Any other value indicates failure. + */ +int kbase_gpuprops_uk_get_props(struct kbase_context *kctx, struct kbase_uk_gpuprops * const kbase_props); + +/** + * kbase_gpuprops_populate_user_buffer - Populate the GPU properties buffer + * @kbdev: The kbase device + * + * Fills kbdev->gpu_props->prop_buffer with the GPU properties for user + * space to read. + */ +int kbase_gpuprops_populate_user_buffer(struct kbase_device *kbdev); + +/** + * kbase_gpuprops_update_core_props_gpu_id - break down gpu id value + * @gpu_props: the &base_gpu_props structure + * + * Break down gpu_id value stored in base_gpu_props::raw_props.gpu_id into + * separate fields (version_status, minor_revision, major_revision, product_id) + * stored in base_gpu_props::core_props. + */ +void kbase_gpuprops_update_core_props_gpu_id(base_gpu_props * const gpu_props); + + +#endif /* _KBASE_GPUPROPS_H_ */ diff --git a/drivers/gpu/arm/midgard/mali_kbase_gpuprops_types.h b/drivers/gpu/arm/midgard/mali_kbase_gpuprops_types.h new file mode 100644 index 00000000000000..10794fc273186b --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_gpuprops_types.h @@ -0,0 +1,92 @@ +/* + * + * (C) COPYRIGHT 2011-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + + +/** + * @file mali_kbase_gpuprops_types.h + * Base kernel property query APIs + */ + +#ifndef _KBASE_GPUPROPS_TYPES_H_ +#define _KBASE_GPUPROPS_TYPES_H_ + +#include "mali_base_kernel.h" + +#define KBASE_GPU_SPEED_MHZ 123 +#define KBASE_GPU_PC_SIZE_LOG2 24U + +struct kbase_gpuprops_regdump { + u32 gpu_id; + u32 l2_features; + u32 suspend_size; /* API 8.2+ */ + u32 tiler_features; + u32 mem_features; + u32 mmu_features; + u32 as_present; + u32 js_present; + u32 thread_max_threads; + u32 thread_max_workgroup_size; + u32 thread_max_barrier_size; + u32 thread_features; + u32 texture_features[BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS]; + u32 js_features[GPU_MAX_JOB_SLOTS]; + u32 shader_present_lo; + u32 shader_present_hi; + u32 tiler_present_lo; + u32 tiler_present_hi; + u32 l2_present_lo; + u32 l2_present_hi; + u32 stack_present_lo; + u32 stack_present_hi; + u32 coherency_features; +}; + +struct kbase_gpu_cache_props { + u8 associativity; + u8 external_bus_width; +}; + +struct kbase_gpu_mem_props { + u8 core_group; +}; + +struct kbase_gpu_mmu_props { + u8 va_bits; + u8 pa_bits; +}; + +struct kbase_gpu_props { + /* kernel-only properties */ + u8 num_cores; + u8 num_core_groups; + u8 num_address_spaces; + u8 num_job_slots; + + struct kbase_gpu_cache_props l2_props; + + struct kbase_gpu_mem_props mem; + struct kbase_gpu_mmu_props mmu; + + /* Properties shared with userspace */ + base_gpu_props props; + + u32 prop_buffer_size; + void *prop_buffer; +}; + +#endif /* _KBASE_GPUPROPS_TYPES_H_ */ diff --git a/drivers/gpu/arm/midgard/mali_kbase_hw.c b/drivers/gpu/arm/midgard/mali_kbase_hw.c new file mode 100644 index 00000000000000..89342e1275ea79 --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_hw.c @@ -0,0 +1,492 @@ +/* + * + * (C) COPYRIGHT 2012-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + + +/* + * Run-time work-arounds helpers + */ + +#include +#include +#include +#include "mali_kbase.h" +#include "mali_kbase_hw.h" + +void kbase_hw_set_features_mask(struct kbase_device *kbdev) +{ + const enum base_hw_feature *features; + u32 gpu_id; + u32 product_id; + + gpu_id = kbdev->gpu_props.props.raw_props.gpu_id; + product_id = gpu_id & GPU_ID_VERSION_PRODUCT_ID; + product_id >>= GPU_ID_VERSION_PRODUCT_ID_SHIFT; + + if (GPU_ID_IS_NEW_FORMAT(product_id)) { + switch (gpu_id & GPU_ID2_PRODUCT_MODEL) { + case GPU_ID2_PRODUCT_TMIX: + features = base_hw_features_tMIx; + break; + case GPU_ID2_PRODUCT_THEX: + features = base_hw_features_tHEx; + break; + case GPU_ID2_PRODUCT_TSIX: + features = base_hw_features_tSIx; + break; + case GPU_ID2_PRODUCT_TDVX: + features = base_hw_features_tDVx; + break; + case GPU_ID2_PRODUCT_TNOX: + features = base_hw_features_tNOx; + break; + case GPU_ID2_PRODUCT_TGOX: + features = base_hw_features_tGOx; + break; + case GPU_ID2_PRODUCT_TKAX: + features = base_hw_features_tKAx; + break; + case GPU_ID2_PRODUCT_TTRX: + features = base_hw_features_tTRx; + break; + case GPU_ID2_PRODUCT_TBOX: + features = base_hw_features_tBOx; + break; + default: + features = base_hw_features_generic; + break; + } + } else { + switch (product_id) { + case GPU_ID_PI_TFRX: + /* FALLTHROUGH */ + case GPU_ID_PI_T86X: + features = base_hw_features_tFxx; + break; + case GPU_ID_PI_T83X: + features = base_hw_features_t83x; + break; + case GPU_ID_PI_T82X: + features = base_hw_features_t82x; + break; + case GPU_ID_PI_T76X: + features = base_hw_features_t76x; + break; + case GPU_ID_PI_T72X: + features = base_hw_features_t72x; + break; + case GPU_ID_PI_T62X: + features = base_hw_features_t62x; + break; + case GPU_ID_PI_T60X: + features = base_hw_features_t60x; + break; + default: + features = base_hw_features_generic; + break; + } + } + + for (; *features != BASE_HW_FEATURE_END; features++) + set_bit(*features, &kbdev->hw_features_mask[0]); +} + +/** + * kbase_hw_get_issues_for_new_id - Get the hardware issues for a new GPU ID + * @kbdev: Device pointer + * + * Return: pointer to an array of hardware issues, terminated by + * BASE_HW_ISSUE_END. + * + * This function can only be used on new-format GPU IDs, i.e. those for which + * GPU_ID_IS_NEW_FORMAT evaluates as true. The GPU ID is read from the @kbdev. + * + * In debugging versions of the driver, unknown versions of a known GPU will + * be treated as the most recent known version not later than the actual + * version. In such circumstances, the GPU ID in @kbdev will also be replaced + * with the most recent known version. + * + * Note: The GPU configuration must have been read by kbase_gpuprops_get_props() + * before calling this function. + */ +static const enum base_hw_issue *kbase_hw_get_issues_for_new_id( + struct kbase_device *kbdev) +{ + const enum base_hw_issue *issues = NULL; + + struct base_hw_product { + u32 product_model; + struct { + u32 version; + const enum base_hw_issue *issues; + } map[7]; + }; + + static const struct base_hw_product base_hw_products[] = { + {GPU_ID2_PRODUCT_TMIX, + {{GPU_ID2_VERSION_MAKE(0, 0, 1), + base_hw_issues_tMIx_r0p0_05dev0}, + {GPU_ID2_VERSION_MAKE(0, 0, 2), base_hw_issues_tMIx_r0p0}, + {GPU_ID2_VERSION_MAKE(0, 1, 0), base_hw_issues_tMIx_r0p1}, + {U32_MAX /* sentinel value */, NULL} } }, + + {GPU_ID2_PRODUCT_THEX, + {{GPU_ID2_VERSION_MAKE(0, 0, 0), base_hw_issues_tHEx_r0p0}, + {GPU_ID2_VERSION_MAKE(0, 0, 1), base_hw_issues_tHEx_r0p0}, + {GPU_ID2_VERSION_MAKE(0, 1, 0), base_hw_issues_tHEx_r0p1}, + {GPU_ID2_VERSION_MAKE(0, 1, 1), base_hw_issues_tHEx_r0p1}, + {GPU_ID2_VERSION_MAKE(0, 2, 0), base_hw_issues_tHEx_r0p2}, + {GPU_ID2_VERSION_MAKE(0, 3, 0), base_hw_issues_tHEx_r0p3}, + {U32_MAX, NULL} } }, + + {GPU_ID2_PRODUCT_TSIX, + {{GPU_ID2_VERSION_MAKE(0, 0, 0), base_hw_issues_tSIx_r0p0}, + {GPU_ID2_VERSION_MAKE(0, 0, 1), base_hw_issues_tSIx_r0p0}, + {GPU_ID2_VERSION_MAKE(0, 1, 0), base_hw_issues_tSIx_r0p1}, + {GPU_ID2_VERSION_MAKE(1, 0, 0), base_hw_issues_tSIx_r1p0}, + {GPU_ID2_VERSION_MAKE(1, 1, 0), base_hw_issues_tSIx_r1p1}, + {U32_MAX, NULL} } }, + + {GPU_ID2_PRODUCT_TDVX, + {{GPU_ID2_VERSION_MAKE(0, 0, 0), base_hw_issues_tDVx_r0p0}, + {U32_MAX, NULL} } }, + + {GPU_ID2_PRODUCT_TNOX, + {{GPU_ID2_VERSION_MAKE(0, 0, 0), base_hw_issues_tNOx_r0p0}, + {U32_MAX, NULL} } }, + + {GPU_ID2_PRODUCT_TGOX, + {{GPU_ID2_VERSION_MAKE(0, 0, 0), base_hw_issues_tGOx_r0p0}, + {U32_MAX, NULL} } }, + + {GPU_ID2_PRODUCT_TKAX, + {{GPU_ID2_VERSION_MAKE(0, 0, 0), base_hw_issues_tKAx_r0p0}, + {U32_MAX, NULL} } }, + + {GPU_ID2_PRODUCT_TTRX, + {{GPU_ID2_VERSION_MAKE(0, 0, 0), base_hw_issues_tTRx_r0p0}, + {U32_MAX, NULL} } }, + + {GPU_ID2_PRODUCT_TBOX, + {{GPU_ID2_VERSION_MAKE(0, 0, 0), base_hw_issues_tBOx_r0p0}, + {U32_MAX, NULL} } }, + }; + + u32 gpu_id = kbdev->gpu_props.props.raw_props.gpu_id; + const u32 product_model = gpu_id & GPU_ID2_PRODUCT_MODEL; + const struct base_hw_product *product = NULL; + size_t p; + + /* Stop when we reach the end of the products array. */ + for (p = 0; p < ARRAY_SIZE(base_hw_products); ++p) { + if (product_model == base_hw_products[p].product_model) { + product = &base_hw_products[p]; + break; + } + } + + if (product != NULL) { + /* Found a matching product. */ + const u32 version = gpu_id & GPU_ID2_VERSION; +#if !MALI_CUSTOMER_RELEASE + u32 fallback_version = 0; + const enum base_hw_issue *fallback_issues = NULL; +#endif + size_t v; + + /* Stop when we reach the end of the map. */ + for (v = 0; product->map[v].version != U32_MAX; ++v) { + + if (version == product->map[v].version) { + /* Exact match so stop. */ + issues = product->map[v].issues; + break; + } + +#if !MALI_CUSTOMER_RELEASE + /* Check whether this is a candidate for most recent + known version not later than the actual + version. */ + if ((version > product->map[v].version) && + (product->map[v].version >= fallback_version)) { + fallback_version = product->map[v].version; + fallback_issues = product->map[v].issues; + } +#endif + } + +#if !MALI_CUSTOMER_RELEASE + if ((issues == NULL) && (fallback_issues != NULL)) { + /* Fall back to the issue set of the most recent known + version not later than the actual version. */ + issues = fallback_issues; + + dev_info(kbdev->dev, + "r%dp%d status %d is unknown; treating as r%dp%d status %d", + (gpu_id & GPU_ID2_VERSION_MAJOR) >> + GPU_ID2_VERSION_MAJOR_SHIFT, + (gpu_id & GPU_ID2_VERSION_MINOR) >> + GPU_ID2_VERSION_MINOR_SHIFT, + (gpu_id & GPU_ID2_VERSION_STATUS) >> + GPU_ID2_VERSION_STATUS_SHIFT, + (fallback_version & GPU_ID2_VERSION_MAJOR) >> + GPU_ID2_VERSION_MAJOR_SHIFT, + (fallback_version & GPU_ID2_VERSION_MINOR) >> + GPU_ID2_VERSION_MINOR_SHIFT, + (fallback_version & GPU_ID2_VERSION_STATUS) >> + GPU_ID2_VERSION_STATUS_SHIFT); + + gpu_id &= ~GPU_ID2_VERSION; + gpu_id |= fallback_version; + kbdev->gpu_props.props.raw_props.gpu_id = gpu_id; + + kbase_gpuprops_update_core_props_gpu_id( + &kbdev->gpu_props.props); + } +#endif + } + return issues; +} + +int kbase_hw_set_issues_mask(struct kbase_device *kbdev) +{ + const enum base_hw_issue *issues; + u32 gpu_id; + u32 product_id; + u32 impl_tech; + + gpu_id = kbdev->gpu_props.props.raw_props.gpu_id; + product_id = gpu_id & GPU_ID_VERSION_PRODUCT_ID; + product_id >>= GPU_ID_VERSION_PRODUCT_ID_SHIFT; + impl_tech = kbdev->gpu_props.props.thread_props.impl_tech; + + if (impl_tech != IMPLEMENTATION_MODEL) { + if (GPU_ID_IS_NEW_FORMAT(product_id)) { + issues = kbase_hw_get_issues_for_new_id(kbdev); + if (issues == NULL) { + dev_err(kbdev->dev, + "Unknown GPU ID %x", gpu_id); + return -EINVAL; + } + +#if !MALI_CUSTOMER_RELEASE + /* The GPU ID might have been replaced with the last + known version of the same GPU. */ + gpu_id = kbdev->gpu_props.props.raw_props.gpu_id; +#endif + + } else { + switch (gpu_id) { + case GPU_ID_MAKE(GPU_ID_PI_T60X, 0, 0, GPU_ID_S_15DEV0): + issues = base_hw_issues_t60x_r0p0_15dev0; + break; + case GPU_ID_MAKE(GPU_ID_PI_T60X, 0, 0, GPU_ID_S_EAC): + issues = base_hw_issues_t60x_r0p0_eac; + break; + case GPU_ID_MAKE(GPU_ID_PI_T60X, 0, 1, 0): + issues = base_hw_issues_t60x_r0p1; + break; + case GPU_ID_MAKE(GPU_ID_PI_T62X, 0, 1, 0): + issues = base_hw_issues_t62x_r0p1; + break; + case GPU_ID_MAKE(GPU_ID_PI_T62X, 1, 0, 0): + case GPU_ID_MAKE(GPU_ID_PI_T62X, 1, 0, 1): + issues = base_hw_issues_t62x_r1p0; + break; + case GPU_ID_MAKE(GPU_ID_PI_T62X, 1, 1, 0): + issues = base_hw_issues_t62x_r1p1; + break; + case GPU_ID_MAKE(GPU_ID_PI_T76X, 0, 0, 1): + issues = base_hw_issues_t76x_r0p0; + break; + case GPU_ID_MAKE(GPU_ID_PI_T76X, 0, 1, 1): + issues = base_hw_issues_t76x_r0p1; + break; + case GPU_ID_MAKE(GPU_ID_PI_T76X, 0, 1, 9): + issues = base_hw_issues_t76x_r0p1_50rel0; + break; + case GPU_ID_MAKE(GPU_ID_PI_T76X, 0, 2, 1): + issues = base_hw_issues_t76x_r0p2; + break; + case GPU_ID_MAKE(GPU_ID_PI_T76X, 0, 3, 1): + issues = base_hw_issues_t76x_r0p3; + break; + case GPU_ID_MAKE(GPU_ID_PI_T76X, 1, 0, 0): + issues = base_hw_issues_t76x_r1p0; + break; + case GPU_ID_MAKE(GPU_ID_PI_T72X, 0, 0, 0): + case GPU_ID_MAKE(GPU_ID_PI_T72X, 0, 0, 1): + case GPU_ID_MAKE(GPU_ID_PI_T72X, 0, 0, 2): + issues = base_hw_issues_t72x_r0p0; + break; + case GPU_ID_MAKE(GPU_ID_PI_T72X, 1, 0, 0): + issues = base_hw_issues_t72x_r1p0; + break; + case GPU_ID_MAKE(GPU_ID_PI_T72X, 1, 1, 0): + issues = base_hw_issues_t72x_r1p1; + break; + case GPU_ID_MAKE(GPU_ID_PI_TFRX, 0, 1, 2): + issues = base_hw_issues_tFRx_r0p1; + break; + case GPU_ID_MAKE(GPU_ID_PI_TFRX, 0, 2, 0): + issues = base_hw_issues_tFRx_r0p2; + break; + case GPU_ID_MAKE(GPU_ID_PI_TFRX, 1, 0, 0): + case GPU_ID_MAKE(GPU_ID_PI_TFRX, 1, 0, 8): + issues = base_hw_issues_tFRx_r1p0; + break; + case GPU_ID_MAKE(GPU_ID_PI_TFRX, 2, 0, 0): + issues = base_hw_issues_tFRx_r2p0; + break; + case GPU_ID_MAKE(GPU_ID_PI_T86X, 0, 2, 0): + issues = base_hw_issues_t86x_r0p2; + break; + case GPU_ID_MAKE(GPU_ID_PI_T86X, 1, 0, 0): + case GPU_ID_MAKE(GPU_ID_PI_T86X, 1, 0, 8): + issues = base_hw_issues_t86x_r1p0; + break; + case GPU_ID_MAKE(GPU_ID_PI_T86X, 2, 0, 0): + issues = base_hw_issues_t86x_r2p0; + break; + case GPU_ID_MAKE(GPU_ID_PI_T83X, 0, 1, 0): + issues = base_hw_issues_t83x_r0p1; + break; + case GPU_ID_MAKE(GPU_ID_PI_T83X, 1, 0, 0): + case GPU_ID_MAKE(GPU_ID_PI_T83X, 1, 0, 8): + issues = base_hw_issues_t83x_r1p0; + break; + case GPU_ID_MAKE(GPU_ID_PI_T82X, 0, 0, 0): + issues = base_hw_issues_t82x_r0p0; + break; + case GPU_ID_MAKE(GPU_ID_PI_T82X, 0, 1, 0): + issues = base_hw_issues_t82x_r0p1; + break; + case GPU_ID_MAKE(GPU_ID_PI_T82X, 1, 0, 0): + case GPU_ID_MAKE(GPU_ID_PI_T82X, 1, 0, 8): + issues = base_hw_issues_t82x_r1p0; + break; + default: + dev_err(kbdev->dev, + "Unknown GPU ID %x", gpu_id); + return -EINVAL; + } + } + } else { + /* Software model */ + if (GPU_ID_IS_NEW_FORMAT(product_id)) { + switch (gpu_id & GPU_ID2_PRODUCT_MODEL) { + case GPU_ID2_PRODUCT_TMIX: + issues = base_hw_issues_model_tMIx; + break; + case GPU_ID2_PRODUCT_THEX: + issues = base_hw_issues_model_tHEx; + break; + case GPU_ID2_PRODUCT_TSIX: + issues = base_hw_issues_model_tSIx; + break; + case GPU_ID2_PRODUCT_TDVX: + issues = base_hw_issues_model_tDVx; + break; + case GPU_ID2_PRODUCT_TNOX: + issues = base_hw_issues_model_tNOx; + break; + case GPU_ID2_PRODUCT_TGOX: + issues = base_hw_issues_model_tGOx; + break; + case GPU_ID2_PRODUCT_TKAX: + issues = base_hw_issues_model_tKAx; + break; + case GPU_ID2_PRODUCT_TTRX: + issues = base_hw_issues_model_tTRx; + break; + case GPU_ID2_PRODUCT_TBOX: + issues = base_hw_issues_model_tBOx; + break; + default: + dev_err(kbdev->dev, + "Unknown GPU ID %x", gpu_id); + return -EINVAL; + } + } else { + switch (product_id) { + case GPU_ID_PI_T60X: + issues = base_hw_issues_model_t60x; + break; + case GPU_ID_PI_T62X: + issues = base_hw_issues_model_t62x; + break; + case GPU_ID_PI_T72X: + issues = base_hw_issues_model_t72x; + break; + case GPU_ID_PI_T76X: + issues = base_hw_issues_model_t76x; + break; + case GPU_ID_PI_TFRX: + issues = base_hw_issues_model_tFRx; + break; + case GPU_ID_PI_T86X: + issues = base_hw_issues_model_t86x; + break; + case GPU_ID_PI_T83X: + issues = base_hw_issues_model_t83x; + break; + case GPU_ID_PI_T82X: + issues = base_hw_issues_model_t82x; + break; + default: + dev_err(kbdev->dev, "Unknown GPU ID %x", + gpu_id); + return -EINVAL; + } + } + } + + if (GPU_ID_IS_NEW_FORMAT(product_id)) { + dev_info(kbdev->dev, + "GPU identified as 0x%x arch %d.%d.%d r%dp%d status %d", + (gpu_id & GPU_ID2_PRODUCT_MAJOR) >> + GPU_ID2_PRODUCT_MAJOR_SHIFT, + (gpu_id & GPU_ID2_ARCH_MAJOR) >> + GPU_ID2_ARCH_MAJOR_SHIFT, + (gpu_id & GPU_ID2_ARCH_MINOR) >> + GPU_ID2_ARCH_MINOR_SHIFT, + (gpu_id & GPU_ID2_ARCH_REV) >> + GPU_ID2_ARCH_REV_SHIFT, + (gpu_id & GPU_ID2_VERSION_MAJOR) >> + GPU_ID2_VERSION_MAJOR_SHIFT, + (gpu_id & GPU_ID2_VERSION_MINOR) >> + GPU_ID2_VERSION_MINOR_SHIFT, + (gpu_id & GPU_ID2_VERSION_STATUS) >> + GPU_ID2_VERSION_STATUS_SHIFT); + } else { + dev_info(kbdev->dev, + "GPU identified as 0x%04x r%dp%d status %d", + (gpu_id & GPU_ID_VERSION_PRODUCT_ID) >> + GPU_ID_VERSION_PRODUCT_ID_SHIFT, + (gpu_id & GPU_ID_VERSION_MAJOR) >> + GPU_ID_VERSION_MAJOR_SHIFT, + (gpu_id & GPU_ID_VERSION_MINOR) >> + GPU_ID_VERSION_MINOR_SHIFT, + (gpu_id & GPU_ID_VERSION_STATUS) >> + GPU_ID_VERSION_STATUS_SHIFT); + } + + for (; *issues != BASE_HW_ISSUE_END; issues++) + set_bit(*issues, &kbdev->hw_issues_mask[0]); + + return 0; +} diff --git a/drivers/gpu/arm/midgard/mali_kbase_hw.h b/drivers/gpu/arm/midgard/mali_kbase_hw.h new file mode 100644 index 00000000000000..754250ce968d94 --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_hw.h @@ -0,0 +1,65 @@ +/* + * + * (C) COPYRIGHT 2012-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + + +/** + * @file + * Run-time work-arounds helpers + */ + +#ifndef _KBASE_HW_H_ +#define _KBASE_HW_H_ + +#include "mali_kbase_defs.h" + +/** + * @brief Tell whether a work-around should be enabled + */ +#define kbase_hw_has_issue(kbdev, issue)\ + test_bit(issue, &(kbdev)->hw_issues_mask[0]) + +/** + * @brief Tell whether a feature is supported + */ +#define kbase_hw_has_feature(kbdev, feature)\ + test_bit(feature, &(kbdev)->hw_features_mask[0]) + +/** + * kbase_hw_set_issues_mask - Set the hardware issues mask based on the GPU ID + * @kbdev: Device pointer + * + * Return: 0 if the GPU ID was recognized, otherwise -EINVAL. + * + * The GPU ID is read from the @kbdev. + * + * In debugging versions of the driver, unknown versions of a known GPU with a + * new-format ID will be treated as the most recent known version not later + * than the actual version. In such circumstances, the GPU ID in @kbdev will + * also be replaced with the most recent known version. + * + * Note: The GPU configuration must have been read by + * kbase_gpuprops_get_props() before calling this function. + */ +int kbase_hw_set_issues_mask(struct kbase_device *kbdev); + +/** + * @brief Set the features mask depending on the GPU ID + */ +void kbase_hw_set_features_mask(struct kbase_device *kbdev); + +#endif /* _KBASE_HW_H_ */ diff --git a/drivers/gpu/arm/midgard/mali_kbase_hwaccess_backend.h b/drivers/gpu/arm/midgard/mali_kbase_hwaccess_backend.h new file mode 100644 index 00000000000000..b09be99e6b4e10 --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_hwaccess_backend.h @@ -0,0 +1,54 @@ +/* + * + * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + +/* + * HW access backend common APIs + */ + +#ifndef _KBASE_HWACCESS_BACKEND_H_ +#define _KBASE_HWACCESS_BACKEND_H_ + +/** + * kbase_backend_early_init - Perform any backend-specific initialization. + * @kbdev: Device pointer + * + * Return: 0 on success, or an error code on failure. + */ +int kbase_backend_early_init(struct kbase_device *kbdev); + +/** + * kbase_backend_late_init - Perform any backend-specific initialization. + * @kbdev: Device pointer + * + * Return: 0 on success, or an error code on failure. + */ +int kbase_backend_late_init(struct kbase_device *kbdev); + +/** + * kbase_backend_early_term - Perform any backend-specific termination. + * @kbdev: Device pointer + */ +void kbase_backend_early_term(struct kbase_device *kbdev); + +/** + * kbase_backend_late_term - Perform any backend-specific termination. + * @kbdev: Device pointer + */ +void kbase_backend_late_term(struct kbase_device *kbdev); + +#endif /* _KBASE_HWACCESS_BACKEND_H_ */ diff --git a/drivers/gpu/arm/midgard/mali_kbase_hwaccess_defs.h b/drivers/gpu/arm/midgard/mali_kbase_hwaccess_defs.h new file mode 100644 index 00000000000000..0acf297192fdcf --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_hwaccess_defs.h @@ -0,0 +1,36 @@ +/* + * + * (C) COPYRIGHT 2014, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + +/** + * @file mali_kbase_hwaccess_gpu_defs.h + * HW access common definitions + */ + +#ifndef _KBASE_HWACCESS_DEFS_H_ +#define _KBASE_HWACCESS_DEFS_H_ + +#include + +/* The hwaccess_lock (a spinlock) must be held when accessing this structure */ +struct kbase_hwaccess_data { + struct kbase_context *active_kctx; + + struct kbase_backend_data backend; +}; + +#endif /* _KBASE_HWACCESS_DEFS_H_ */ diff --git a/drivers/gpu/arm/midgard/mali_kbase_hwaccess_gpuprops.h b/drivers/gpu/arm/midgard/mali_kbase_hwaccess_gpuprops.h new file mode 100644 index 00000000000000..cf8a8131c22ed2 --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_hwaccess_gpuprops.h @@ -0,0 +1,47 @@ +/* + * + * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + +/** + * Base kernel property query backend APIs + */ + +#ifndef _KBASE_HWACCESS_GPUPROPS_H_ +#define _KBASE_HWACCESS_GPUPROPS_H_ + +/** + * kbase_backend_gpuprops_get() - Fill @regdump with GPU properties read from + * GPU + * @kbdev: Device pointer + * @regdump: Pointer to struct kbase_gpuprops_regdump structure + */ +void kbase_backend_gpuprops_get(struct kbase_device *kbdev, + struct kbase_gpuprops_regdump *regdump); + +/** + * kbase_backend_gpuprops_get - Fill @regdump with GPU properties read from GPU + * @kbdev: Device pointer + * @regdump: Pointer to struct kbase_gpuprops_regdump structure + * + * This function reads GPU properties that are dependent on the hardware + * features bitmask + */ +void kbase_backend_gpuprops_get_features(struct kbase_device *kbdev, + struct kbase_gpuprops_regdump *regdump); + + +#endif /* _KBASE_HWACCESS_GPUPROPS_H_ */ diff --git a/drivers/gpu/arm/midgard/mali_kbase_hwaccess_instr.h b/drivers/gpu/arm/midgard/mali_kbase_hwaccess_instr.h new file mode 100644 index 00000000000000..5de2b7535bb47c --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_hwaccess_instr.h @@ -0,0 +1,116 @@ +/* + * + * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + + +/* + * HW Access instrumentation common APIs + */ + +#ifndef _KBASE_HWACCESS_INSTR_H_ +#define _KBASE_HWACCESS_INSTR_H_ + +#include + +/** + * kbase_instr_hwcnt_enable_internal - Enable HW counters collection + * @kbdev: Kbase device + * @kctx: Kbase context + * @setup: HW counter setup parameters + * + * Context: might sleep, waiting for reset to complete + * + * Return: 0 on success + */ +int kbase_instr_hwcnt_enable_internal(struct kbase_device *kbdev, + struct kbase_context *kctx, + struct kbase_uk_hwcnt_setup *setup); + +/** + * kbase_instr_hwcnt_disable_internal - Disable HW counters collection + * @kctx: Kbase context + * + * Context: might sleep, waiting for an ongoing dump to complete + * + * Return: 0 on success + */ +int kbase_instr_hwcnt_disable_internal(struct kbase_context *kctx); + +/** + * kbase_instr_hwcnt_request_dump() - Request HW counter dump from GPU + * @kctx: Kbase context + * + * Caller must either wait for kbase_instr_hwcnt_dump_complete() to return true, + * of call kbase_instr_hwcnt_wait_for_dump(). + * + * Return: 0 on success + */ +int kbase_instr_hwcnt_request_dump(struct kbase_context *kctx); + +/** + * kbase_instr_hwcnt_wait_for_dump() - Wait until pending HW counter dump has + * completed. + * @kctx: Kbase context + * + * Context: will sleep, waiting for dump to complete + * + * Return: 0 on success + */ +int kbase_instr_hwcnt_wait_for_dump(struct kbase_context *kctx); + +/** + * kbase_instr_hwcnt_dump_complete - Tell whether the HW counters dump has + * completed + * @kctx: Kbase context + * @success: Set to true if successful + * + * Context: does not sleep. + * + * Return: true if the dump is complete + */ +bool kbase_instr_hwcnt_dump_complete(struct kbase_context *kctx, + bool * const success); + +/** + * kbase_instr_hwcnt_clear() - Clear HW counters + * @kctx: Kbase context + * + * Context: might sleep, waiting for reset to complete + * + * Return: 0 on success + */ +int kbase_instr_hwcnt_clear(struct kbase_context *kctx); + +/** + * kbase_instr_backend_init() - Initialise the instrumentation backend + * @kbdev: Kbase device + * + * This function should be called during driver initialization. + * + * Return: 0 on success + */ +int kbase_instr_backend_init(struct kbase_device *kbdev); + +/** + * kbase_instr_backend_init() - Terminate the instrumentation backend + * @kbdev: Kbase device + * + * This function should be called during driver termination. + */ +void kbase_instr_backend_term(struct kbase_device *kbdev); + +#endif /* _KBASE_HWACCESS_INSTR_H_ */ diff --git a/drivers/gpu/arm/midgard/mali_kbase_hwaccess_jm.h b/drivers/gpu/arm/midgard/mali_kbase_hwaccess_jm.h new file mode 100644 index 00000000000000..750fda2cd81dec --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_hwaccess_jm.h @@ -0,0 +1,381 @@ +/* + * + * (C) COPYRIGHT 2014-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + +/* + * HW access job manager common APIs + */ + +#ifndef _KBASE_HWACCESS_JM_H_ +#define _KBASE_HWACCESS_JM_H_ + +/** + * kbase_backend_run_atom() - Run an atom on the GPU + * @kbdev: Device pointer + * @atom: Atom to run + * + * Caller must hold the HW access lock + */ +void kbase_backend_run_atom(struct kbase_device *kbdev, + struct kbase_jd_atom *katom); + +/** + * kbase_backend_slot_update - Update state based on slot ringbuffers + * + * @kbdev: Device pointer + * + * Inspect the jobs in the slot ringbuffers and update state. + * + * This will cause jobs to be submitted to hardware if they are unblocked + */ +void kbase_backend_slot_update(struct kbase_device *kbdev); + +/** + * kbase_backend_find_and_release_free_address_space() - Release a free AS + * @kbdev: Device pointer + * @kctx: Context pointer + * + * This function can evict an idle context from the runpool, freeing up the + * address space it was using. + * + * The address space is marked as in use. The caller must either assign a + * context using kbase_gpu_use_ctx(), or release it using + * kbase_ctx_sched_release() + * + * Return: Number of free address space, or KBASEP_AS_NR_INVALID if none + * available + */ +int kbase_backend_find_and_release_free_address_space( + struct kbase_device *kbdev, struct kbase_context *kctx); + +/** + * kbase_backend_use_ctx() - Activate a currently unscheduled context, using the + * provided address space. + * @kbdev: Device pointer + * @kctx: Context pointer. May be NULL + * @as_nr: Free address space to use + * + * kbase_gpu_next_job() will pull atoms from the active context. + * + * Return: true if successful, false if ASID not assigned. + */ +bool kbase_backend_use_ctx(struct kbase_device *kbdev, + struct kbase_context *kctx, + int as_nr); + +/** + * kbase_backend_use_ctx_sched() - Activate a context. + * @kbdev: Device pointer + * @kctx: Context pointer + * + * kbase_gpu_next_job() will pull atoms from the active context. + * + * The context must already be scheduled and assigned to an address space. If + * the context is not scheduled, then kbase_gpu_use_ctx() should be used + * instead. + * + * Caller must hold hwaccess_lock + * + * Return: true if context is now active, false otherwise (ie if context does + * not have an address space assigned) + */ +bool kbase_backend_use_ctx_sched(struct kbase_device *kbdev, + struct kbase_context *kctx); + +/** + * kbase_backend_release_ctx_irq - Release a context from the GPU. This will + * de-assign the assigned address space. + * @kbdev: Device pointer + * @kctx: Context pointer + * + * Caller must hold kbase_device->mmu_hw_mutex and hwaccess_lock + */ +void kbase_backend_release_ctx_irq(struct kbase_device *kbdev, + struct kbase_context *kctx); + +/** + * kbase_backend_release_ctx_noirq - Release a context from the GPU. This will + * de-assign the assigned address space. + * @kbdev: Device pointer + * @kctx: Context pointer + * + * Caller must hold kbase_device->mmu_hw_mutex + * + * This function must perform any operations that could not be performed in IRQ + * context by kbase_backend_release_ctx_irq(). + */ +void kbase_backend_release_ctx_noirq(struct kbase_device *kbdev, + struct kbase_context *kctx); + +/** + * kbase_backend_cacheclean - Perform a cache clean if the given atom requires + * one + * @kbdev: Device pointer + * @katom: Pointer to the failed atom + * + * On some GPUs, the GPU cache must be cleaned following a failed atom. This + * function performs a clean if it is required by @katom. + */ +void kbase_backend_cacheclean(struct kbase_device *kbdev, + struct kbase_jd_atom *katom); + + +/** + * kbase_backend_complete_wq() - Perform backend-specific actions required on + * completing an atom. + * @kbdev: Device pointer + * @katom: Pointer to the atom to complete + * + * This function should only be called from kbase_jd_done_worker() or + * js_return_worker(). + * + * Return: true if atom has completed, false if atom should be re-submitted + */ +void kbase_backend_complete_wq(struct kbase_device *kbdev, + struct kbase_jd_atom *katom); + +/** + * kbase_backend_complete_wq_post_sched - Perform backend-specific actions + * required on completing an atom, after + * any scheduling has taken place. + * @kbdev: Device pointer + * @core_req: Core requirements of atom + * @affinity: Affinity of atom + * @coreref_state: Coreref state of atom + * + * This function should only be called from kbase_jd_done_worker() or + * js_return_worker(). + */ +void kbase_backend_complete_wq_post_sched(struct kbase_device *kbdev, + base_jd_core_req core_req, u64 affinity, + enum kbase_atom_coreref_state coreref_state); + +/** + * kbase_backend_reset() - The GPU is being reset. Cancel all jobs on the GPU + * and remove any others from the ringbuffers. + * @kbdev: Device pointer + * @end_timestamp: Timestamp of reset + */ +void kbase_backend_reset(struct kbase_device *kbdev, ktime_t *end_timestamp); + +/** + * kbase_backend_inspect_head() - Return the atom currently at the head of slot + * @js + * @kbdev: Device pointer + * @js: Job slot to inspect + * + * Return : Atom currently at the head of slot @js, or NULL + */ +struct kbase_jd_atom *kbase_backend_inspect_head(struct kbase_device *kbdev, + int js); + +/** + * kbase_backend_inspect_tail - Return the atom currently at the tail of slot + * @js + * @kbdev: Device pointer + * @js: Job slot to inspect + * + * Return : Atom currently at the head of slot @js, or NULL + */ +struct kbase_jd_atom *kbase_backend_inspect_tail(struct kbase_device *kbdev, + int js); + +/** + * kbase_backend_nr_atoms_on_slot() - Return the number of atoms currently on a + * slot. + * @kbdev: Device pointer + * @js: Job slot to inspect + * + * Return : Number of atoms currently on slot + */ +int kbase_backend_nr_atoms_on_slot(struct kbase_device *kbdev, int js); + +/** + * kbase_backend_nr_atoms_submitted() - Return the number of atoms on a slot + * that are currently on the GPU. + * @kbdev: Device pointer + * @js: Job slot to inspect + * + * Return : Number of atoms currently on slot @js that are currently on the GPU. + */ +int kbase_backend_nr_atoms_submitted(struct kbase_device *kbdev, int js); + +/** + * kbase_backend_ctx_count_changed() - Number of contexts ready to submit jobs + * has changed. + * @kbdev: Device pointer + * + * Perform any required backend-specific actions (eg starting/stopping + * scheduling timers). + */ +void kbase_backend_ctx_count_changed(struct kbase_device *kbdev); + +/** + * kbase_backend_timeouts_changed() - Job Scheduler timeouts have changed. + * @kbdev: Device pointer + * + * Perform any required backend-specific actions (eg updating timeouts of + * currently running atoms). + */ +void kbase_backend_timeouts_changed(struct kbase_device *kbdev); + +/** + * kbase_backend_slot_free() - Return the number of jobs that can be currently + * submitted to slot @js. + * @kbdev: Device pointer + * @js: Job slot to inspect + * + * Return : Number of jobs that can be submitted. + */ +int kbase_backend_slot_free(struct kbase_device *kbdev, int js); + +/** + * kbase_job_check_enter_disjoint - potentially leave disjoint state + * @kbdev: kbase device + * @target_katom: atom which is finishing + * + * Work out whether to leave disjoint state when finishing an atom that was + * originated by kbase_job_check_enter_disjoint(). + */ +void kbase_job_check_leave_disjoint(struct kbase_device *kbdev, + struct kbase_jd_atom *target_katom); + +/** + * kbase_backend_jm_kill_jobs_from_kctx - Kill all jobs that are currently + * running from a context + * @kctx: Context pointer + * + * This is used in response to a page fault to remove all jobs from the faulting + * context from the hardware. + */ +void kbase_backend_jm_kill_jobs_from_kctx(struct kbase_context *kctx); + +/** + * kbase_jm_wait_for_zero_jobs - Wait for context to have zero jobs running, and + * to be descheduled. + * @kctx: Context pointer + * + * This should be called following kbase_js_zap_context(), to ensure the context + * can be safely destroyed. + */ +void kbase_jm_wait_for_zero_jobs(struct kbase_context *kctx); + +/** + * kbase_backend_get_current_flush_id - Return the current flush ID + * + * @kbdev: Device pointer + * + * Return: the current flush ID to be recorded for each job chain + */ +u32 kbase_backend_get_current_flush_id(struct kbase_device *kbdev); + +#if KBASE_GPU_RESET_EN +/** + * kbase_prepare_to_reset_gpu - Prepare for resetting the GPU. + * @kbdev: Device pointer + * + * This function just soft-stops all the slots to ensure that as many jobs as + * possible are saved. + * + * Return: a boolean which should be interpreted as follows: + * - true - Prepared for reset, kbase_reset_gpu should be called. + * - false - Another thread is performing a reset, kbase_reset_gpu should + * not be called. + */ +bool kbase_prepare_to_reset_gpu(struct kbase_device *kbdev); + +/** + * kbase_reset_gpu - Reset the GPU + * @kbdev: Device pointer + * + * This function should be called after kbase_prepare_to_reset_gpu if it returns + * true. It should never be called without a corresponding call to + * kbase_prepare_to_reset_gpu. + * + * After this function is called (or not called if kbase_prepare_to_reset_gpu + * returned false), the caller should wait for kbdev->reset_waitq to be + * signalled to know when the reset has completed. + */ +void kbase_reset_gpu(struct kbase_device *kbdev); + +/** + * kbase_prepare_to_reset_gpu_locked - Prepare for resetting the GPU. + * @kbdev: Device pointer + * + * This function just soft-stops all the slots to ensure that as many jobs as + * possible are saved. + * + * Return: a boolean which should be interpreted as follows: + * - true - Prepared for reset, kbase_reset_gpu should be called. + * - false - Another thread is performing a reset, kbase_reset_gpu should + * not be called. + */ +bool kbase_prepare_to_reset_gpu_locked(struct kbase_device *kbdev); + +/** + * kbase_reset_gpu_locked - Reset the GPU + * @kbdev: Device pointer + * + * This function should be called after kbase_prepare_to_reset_gpu if it + * returns true. It should never be called without a corresponding call to + * kbase_prepare_to_reset_gpu. + * + * After this function is called (or not called if kbase_prepare_to_reset_gpu + * returned false), the caller should wait for kbdev->reset_waitq to be + * signalled to know when the reset has completed. + */ +void kbase_reset_gpu_locked(struct kbase_device *kbdev); + +/** + * kbase_reset_gpu_silent - Reset the GPU silently + * @kbdev: Device pointer + * + * Reset the GPU without trying to cancel jobs and don't emit messages into + * the kernel log while doing the reset. + * + * This function should be used in cases where we are doing a controlled reset + * of the GPU as part of normal processing (e.g. exiting protected mode) where + * the driver will have ensured the scheduler has been idled and all other + * users of the GPU (e.g. instrumentation) have been suspended. + */ +void kbase_reset_gpu_silent(struct kbase_device *kbdev); + +/** + * kbase_reset_gpu_active - Reports if the GPU is being reset + * @kbdev: Device pointer + * + * Return: True if the GPU is in the process of being reset. + */ +bool kbase_reset_gpu_active(struct kbase_device *kbdev); +#endif + +/** + * kbase_job_slot_hardstop - Hard-stop the specified job slot + * @kctx: The kbase context that contains the job(s) that should + * be hard-stopped + * @js: The job slot to hard-stop + * @target_katom: The job that should be hard-stopped (or NULL for all + * jobs from the context) + * Context: + * The job slot lock must be held when calling this function. + */ +void kbase_job_slot_hardstop(struct kbase_context *kctx, int js, + struct kbase_jd_atom *target_katom); + +extern struct protected_mode_ops kbase_native_protected_ops; + +#endif /* _KBASE_HWACCESS_JM_H_ */ diff --git a/drivers/gpu/arm/midgard/mali_kbase_hwaccess_pm.h b/drivers/gpu/arm/midgard/mali_kbase_hwaccess_pm.h new file mode 100644 index 00000000000000..71c7d495c40ab9 --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_hwaccess_pm.h @@ -0,0 +1,209 @@ +/* + * + * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + +/** + * @file mali_kbase_hwaccess_pm.h + * HW access power manager common APIs + */ + +#ifndef _KBASE_HWACCESS_PM_H_ +#define _KBASE_HWACCESS_PM_H_ + +#include +#include + +#include + +/* Forward definition - see mali_kbase.h */ +struct kbase_device; + +/* Functions common to all HW access backends */ + +/** + * Initialize the power management framework. + * + * Must be called before any other power management function + * + * @param kbdev The kbase device structure for the device (must be a valid + * pointer) + * + * @return 0 if the power management framework was successfully + * initialized. + */ +int kbase_hwaccess_pm_init(struct kbase_device *kbdev); + +/** + * Terminate the power management framework. + * + * No power management functions may be called after this (except + * @ref kbase_pm_init) + * + * @param kbdev The kbase device structure for the device (must be a valid + * pointer) + */ +void kbase_hwaccess_pm_term(struct kbase_device *kbdev); + +/** + * kbase_hwaccess_pm_powerup - Power up the GPU. + * @kbdev: The kbase device structure for the device (must be a valid pointer) + * @flags: Flags to pass on to kbase_pm_init_hw + * + * Power up GPU after all modules have been initialized and interrupt handlers + * installed. + * + * Return: 0 if powerup was successful. + */ +int kbase_hwaccess_pm_powerup(struct kbase_device *kbdev, + unsigned int flags); + +/** + * Halt the power management framework. + * + * Should ensure that no new interrupts are generated, but allow any currently + * running interrupt handlers to complete successfully. The GPU is forced off by + * the time this function returns, regardless of whether or not the active power + * policy asks for the GPU to be powered off. + * + * @param kbdev The kbase device structure for the device (must be a valid + * pointer) + */ +void kbase_hwaccess_pm_halt(struct kbase_device *kbdev); + +/** + * Perform any backend-specific actions to suspend the GPU + * + * @param kbdev The kbase device structure for the device (must be a valid + * pointer) + */ +void kbase_hwaccess_pm_suspend(struct kbase_device *kbdev); + +/** + * Perform any backend-specific actions to resume the GPU from a suspend + * + * @param kbdev The kbase device structure for the device (must be a valid + * pointer) + */ +void kbase_hwaccess_pm_resume(struct kbase_device *kbdev); + +/** + * Perform any required actions for activating the GPU. Called when the first + * context goes active. + * + * @param kbdev The kbase device structure for the device (must be a valid + * pointer) + */ +void kbase_hwaccess_pm_gpu_active(struct kbase_device *kbdev); + +/** + * Perform any required actions for idling the GPU. Called when the last + * context goes idle. + * + * @param kbdev The kbase device structure for the device (must be a valid + * pointer) + */ +void kbase_hwaccess_pm_gpu_idle(struct kbase_device *kbdev); + + +/** + * Set the debug core mask. + * + * This determines which cores the power manager is allowed to use. + * + * @param kbdev The kbase device structure for the device (must be a + * valid pointer) + * @param new_core_mask_js0 The core mask to use for job slot 0 + * @param new_core_mask_js0 The core mask to use for job slot 1 + * @param new_core_mask_js0 The core mask to use for job slot 2 + */ +void kbase_pm_set_debug_core_mask(struct kbase_device *kbdev, + u64 new_core_mask_js0, u64 new_core_mask_js1, + u64 new_core_mask_js2); + + +/** + * Get the current policy. + * + * Returns the policy that is currently active. + * + * @param kbdev The kbase device structure for the device (must be a valid + * pointer) + * + * @return The current policy + */ +const struct kbase_pm_ca_policy +*kbase_pm_ca_get_policy(struct kbase_device *kbdev); + +/** + * Change the policy to the one specified. + * + * @param kbdev The kbase device structure for the device (must be a valid + * pointer) + * @param policy The policy to change to (valid pointer returned from + * @ref kbase_pm_ca_list_policies) + */ +void kbase_pm_ca_set_policy(struct kbase_device *kbdev, + const struct kbase_pm_ca_policy *policy); + +/** + * Retrieve a static list of the available policies. + * + * @param[out] policies An array pointer to take the list of policies. This may + * be NULL. The contents of this array must not be + * modified. + * + * @return The number of policies + */ +int +kbase_pm_ca_list_policies(const struct kbase_pm_ca_policy * const **policies); + + +/** + * Get the current policy. + * + * Returns the policy that is currently active. + * + * @param kbdev The kbase device structure for the device (must be a valid + * pointer) + * + * @return The current policy + */ +const struct kbase_pm_policy *kbase_pm_get_policy(struct kbase_device *kbdev); + +/** + * Change the policy to the one specified. + * + * @param kbdev The kbase device structure for the device (must be a valid + * pointer) + * @param policy The policy to change to (valid pointer returned from + * @ref kbase_pm_list_policies) + */ +void kbase_pm_set_policy(struct kbase_device *kbdev, + const struct kbase_pm_policy *policy); + +/** + * Retrieve a static list of the available policies. + * + * @param[out] policies An array pointer to take the list of policies. This may + * be NULL. The contents of this array must not be + * modified. + * + * @return The number of policies + */ +int kbase_pm_list_policies(const struct kbase_pm_policy * const **policies); + +#endif /* _KBASE_HWACCESS_PM_H_ */ diff --git a/drivers/gpu/arm/midgard/mali_kbase_hwaccess_time.h b/drivers/gpu/arm/midgard/mali_kbase_hwaccess_time.h new file mode 100644 index 00000000000000..e3475354545bcb --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_hwaccess_time.h @@ -0,0 +1,58 @@ +/* + * + * (C) COPYRIGHT 2014 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + +/** + * + */ + +#ifndef _KBASE_BACKEND_TIME_H_ +#define _KBASE_BACKEND_TIME_H_ + +/** + * kbase_backend_get_gpu_time() - Get current GPU time + * @kbdev: Device pointer + * @cycle_counter: Pointer to u64 to store cycle counter in + * @system_time: Pointer to u64 to store system time in + * @ts: Pointer to struct timespec to store current monotonic + * time in + */ +void kbase_backend_get_gpu_time(struct kbase_device *kbdev, u64 *cycle_counter, + u64 *system_time, +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0) + struct timespec64 *ts); +#else + struct timespec *ts); +#endif + +/** + * kbase_wait_write_flush() - Wait for GPU write flush + * @kctx: Context pointer + * + * Wait 1000 GPU clock cycles. This delay is known to give the GPU time to flush + * its write buffer. + * + * If GPU resets occur then the counters are reset to zero, the delay may not be + * as expected. + * + * This function is only in use for BASE_HW_ISSUE_6367 + */ +#ifndef CONFIG_MALI_NO_MALI +void kbase_wait_write_flush(struct kbase_context *kctx); +#endif + +#endif /* _KBASE_BACKEND_TIME_H_ */ diff --git a/drivers/gpu/arm/midgard/mali_kbase_hwcnt_reader.h b/drivers/gpu/arm/midgard/mali_kbase_hwcnt_reader.h new file mode 100644 index 00000000000000..cf7bf1b35dc59e --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_hwcnt_reader.h @@ -0,0 +1,66 @@ +/* + * + * (C) COPYRIGHT 2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +#ifndef _KBASE_HWCNT_READER_H_ +#define _KBASE_HWCNT_READER_H_ + +/* The ids of ioctl commands. */ +#define KBASE_HWCNT_READER 0xBE +#define KBASE_HWCNT_READER_GET_HWVER _IOR(KBASE_HWCNT_READER, 0x00, u32) +#define KBASE_HWCNT_READER_GET_BUFFER_SIZE _IOR(KBASE_HWCNT_READER, 0x01, u32) +#define KBASE_HWCNT_READER_DUMP _IOW(KBASE_HWCNT_READER, 0x10, u32) +#define KBASE_HWCNT_READER_CLEAR _IOW(KBASE_HWCNT_READER, 0x11, u32) +#define KBASE_HWCNT_READER_GET_BUFFER _IOR(KBASE_HWCNT_READER, 0x20,\ + struct kbase_hwcnt_reader_metadata) +#define KBASE_HWCNT_READER_PUT_BUFFER _IOW(KBASE_HWCNT_READER, 0x21,\ + struct kbase_hwcnt_reader_metadata) +#define KBASE_HWCNT_READER_SET_INTERVAL _IOW(KBASE_HWCNT_READER, 0x30, u32) +#define KBASE_HWCNT_READER_ENABLE_EVENT _IOW(KBASE_HWCNT_READER, 0x40, u32) +#define KBASE_HWCNT_READER_DISABLE_EVENT _IOW(KBASE_HWCNT_READER, 0x41, u32) +#define KBASE_HWCNT_READER_GET_API_VERSION _IOW(KBASE_HWCNT_READER, 0xFF, u32) + +/** + * struct kbase_hwcnt_reader_metadata - hwcnt reader sample buffer metadata + * @timestamp: time when sample was collected + * @event_id: id of an event that triggered sample collection + * @buffer_idx: position in sampling area where sample buffer was stored + */ +struct kbase_hwcnt_reader_metadata { + u64 timestamp; + u32 event_id; + u32 buffer_idx; +}; + +/** + * enum base_hwcnt_reader_event - hwcnt dumping events + * @BASE_HWCNT_READER_EVENT_MANUAL: manual request for dump + * @BASE_HWCNT_READER_EVENT_PERIODIC: periodic dump + * @BASE_HWCNT_READER_EVENT_PREJOB: prejob dump request + * @BASE_HWCNT_READER_EVENT_POSTJOB: postjob dump request + * @BASE_HWCNT_READER_EVENT_COUNT: number of supported events + */ +enum base_hwcnt_reader_event { + BASE_HWCNT_READER_EVENT_MANUAL, + BASE_HWCNT_READER_EVENT_PERIODIC, + BASE_HWCNT_READER_EVENT_PREJOB, + BASE_HWCNT_READER_EVENT_POSTJOB, + + BASE_HWCNT_READER_EVENT_COUNT +}; + +#endif /* _KBASE_HWCNT_READER_H_ */ + diff --git a/drivers/gpu/arm/midgard/mali_kbase_ioctl.h b/drivers/gpu/arm/midgard/mali_kbase_ioctl.h new file mode 100644 index 00000000000000..c953a5ea32d980 --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_ioctl.h @@ -0,0 +1,665 @@ +/* + * + * (C) COPYRIGHT 2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +#ifndef _KBASE_IOCTL_H_ +#define _KBASE_IOCTL_H_ + +#ifdef __cpluscplus +extern "C" { +#endif + +#include + +#define KBASE_IOCTL_TYPE 0x80 + +#define BASE_UK_VERSION_MAJOR 11 +#define BASE_UK_VERSION_MINOR 0 + +#ifdef ANDROID +/* Android's definition of ioctl is incorrect, specifying the type argument as + * 'int'. This creates a warning when using _IOWR (as the top bit is set). Work + * round this by redefining _IOC to include a case to 'int'. + */ +#undef _IOC +#define _IOC(dir, type, nr, size) \ + ((int)(((dir) << _IOC_DIRSHIFT) | ((type) << _IOC_TYPESHIFT) | \ + ((nr) << _IOC_NRSHIFT) | ((size) << _IOC_SIZESHIFT))) +#endif + +/** + * struct kbase_ioctl_version_check - Check version compatibility with kernel + * + * @major: Major version number + * @minor: Minor version number + */ +struct kbase_ioctl_version_check { + __u16 major; + __u16 minor; +}; + +#define KBASE_IOCTL_VERSION_CHECK \ + _IOWR(KBASE_IOCTL_TYPE, 0, struct kbase_ioctl_version_check) + +/** + * struct kbase_ioctl_set_flags - Set kernel context creation flags + * + * @create_flags: Flags - see base_context_create_flags + */ +struct kbase_ioctl_set_flags { + __u32 create_flags; +}; + +#define KBASE_IOCTL_SET_FLAGS \ + _IOW(KBASE_IOCTL_TYPE, 1, struct kbase_ioctl_set_flags) + +/** + * struct kbase_ioctl_job_submit - Submit jobs/atoms to the kernel + * + * @addr: Memory address of an array of struct base_jd_atom_v2 + * @nr_atoms: Number of entries in the array + * @stride: sizeof(struct base_jd_atom_v2) + */ +struct kbase_ioctl_job_submit { + __u64 addr; + __u32 nr_atoms; + __u32 stride; +}; + +#define KBASE_IOCTL_JOB_SUBMIT \ + _IOW(KBASE_IOCTL_TYPE, 2, struct kbase_ioctl_job_submit) + +/** + * struct kbase_ioctl_get_gpuprops - Read GPU properties from the kernel + * + * @buffer: Pointer to the buffer to store properties into + * @size: Size of the buffer + * @flags: Flags - must be zero for now + * + * The ioctl will return the number of bytes stored into @buffer or an error + * on failure (e.g. @size is too small). If @size is specified as 0 then no + * data will be written but the return value will be the number of bytes needed + * for all the properties. + * + * @flags may be used in the future to request a different format for the + * buffer. With @flags == 0 the following format is used. + * + * The buffer will be filled with pairs of values, a u32 key identifying the + * property followed by the value. The size of the value is identified using + * the bottom bits of the key. The value then immediately followed the key and + * is tightly packed (there is no padding). All keys and values are + * little-endian. + * + * 00 = u8 + * 01 = u16 + * 10 = u32 + * 11 = u64 + */ +struct kbase_ioctl_get_gpuprops { + __u64 buffer; + __u32 size; + __u32 flags; +}; + +#define KBASE_IOCTL_GET_GPUPROPS \ + _IOW(KBASE_IOCTL_TYPE, 3, struct kbase_ioctl_get_gpuprops) + +#define KBASE_IOCTL_POST_TERM \ + _IO(KBASE_IOCTL_TYPE, 4) + +/** + * union kbase_ioctl_mem_alloc - Allocate memory on the GPU + * + * @va_pages: The number of pages of virtual address space to reserve + * @commit_pages: The number of physical pages to allocate + * @extent: The number of extra pages to allocate on each GPU fault which grows + * the region + * @flags: Flags + * @gpu_va: The GPU virtual address which is allocated + * + * @in: Input parameters + * @out: Output parameters + */ +union kbase_ioctl_mem_alloc { + struct { + __u64 va_pages; + __u64 commit_pages; + __u64 extent; + __u64 flags; + } in; + struct { + __u64 flags; + __u64 gpu_va; + } out; +}; + +#define KBASE_IOCTL_MEM_ALLOC \ + _IOWR(KBASE_IOCTL_TYPE, 5, union kbase_ioctl_mem_alloc) + +/** + * struct kbase_ioctl_mem_query - Query properties of a GPU memory region + * @gpu_addr: A GPU address contained within the region + * @query: The type of query + * @value: The result of the query + * + * Use a %KBASE_MEM_QUERY_xxx flag as input for @query. + * + * @in: Input parameters + * @out: Output parameters + */ +union kbase_ioctl_mem_query { + struct { + __u64 gpu_addr; + __u64 query; + } in; + struct { + __u64 value; + } out; +}; + +#define KBASE_IOCTL_MEM_QUERY \ + _IOWR(KBASE_IOCTL_TYPE, 6, union kbase_ioctl_mem_query) + +#define KBASE_MEM_QUERY_COMMIT_SIZE 1 +#define KBASE_MEM_QUERY_VA_SIZE 2 +#define KBASE_MEM_QUERY_FLAGS 3 + +/** + * struct kbase_ioctl_mem_free - Free a memory region + * @gpu_addr: Handle to the region to free + */ +struct kbase_ioctl_mem_free { + __u64 gpu_addr; +}; + +#define KBASE_IOCTL_MEM_FREE \ + _IOW(KBASE_IOCTL_TYPE, 7, struct kbase_ioctl_mem_free) + +/** + * struct kbase_ioctl_hwcnt_reader_setup - Setup HWC dumper/reader + * @buffer_count: requested number of dumping buffers + * @jm_bm: counters selection bitmask (JM) + * @shader_bm: counters selection bitmask (Shader) + * @tiler_bm: counters selection bitmask (Tiler) + * @mmu_l2_bm: counters selection bitmask (MMU_L2) + * + * A fd is returned from the ioctl if successful, or a negative value on error + */ +struct kbase_ioctl_hwcnt_reader_setup { + __u32 buffer_count; + __u32 jm_bm; + __u32 shader_bm; + __u32 tiler_bm; + __u32 mmu_l2_bm; +}; + +#define KBASE_IOCTL_HWCNT_READER_SETUP \ + _IOW(KBASE_IOCTL_TYPE, 8, struct kbase_ioctl_hwcnt_reader_setup) + +/** + * struct kbase_ioctl_hwcnt_enable - Enable hardware counter collection + * @dump_buffer: GPU address to write counters to + * @jm_bm: counters selection bitmask (JM) + * @shader_bm: counters selection bitmask (Shader) + * @tiler_bm: counters selection bitmask (Tiler) + * @mmu_l2_bm: counters selection bitmask (MMU_L2) + */ +struct kbase_ioctl_hwcnt_enable { + __u64 dump_buffer; + __u32 jm_bm; + __u32 shader_bm; + __u32 tiler_bm; + __u32 mmu_l2_bm; +}; + +#define KBASE_IOCTL_HWCNT_ENABLE \ + _IOW(KBASE_IOCTL_TYPE, 9, struct kbase_ioctl_hwcnt_enable) + +#define KBASE_IOCTL_HWCNT_DUMP \ + _IO(KBASE_IOCTL_TYPE, 10) + +#define KBASE_IOCTL_HWCNT_CLEAR \ + _IO(KBASE_IOCTL_TYPE, 11) + +/** + * struct kbase_ioctl_disjoint_query - Query the disjoint counter + * @counter: A counter of disjoint events in the kernel + */ +struct kbase_ioctl_disjoint_query { + __u32 counter; +}; + +#define KBASE_IOCTL_DISJOINT_QUERY \ + _IOR(KBASE_IOCTL_TYPE, 12, struct kbase_ioctl_disjoint_query) + +/** + * struct kbase_ioctl_get_ddk_version - Query the kernel version + * @version_buffer: Buffer to receive the kernel version string + * @size: Size of the buffer + * @padding: Padding + * + * The ioctl will return the number of bytes written into version_buffer + * (which includes a NULL byte) or a negative error code + */ +struct kbase_ioctl_get_ddk_version { + __u64 version_buffer; + __u32 size; + __u32 padding; +}; + +#define KBASE_IOCTL_GET_DDK_VERSION \ + _IOW(KBASE_IOCTL_TYPE, 13, struct kbase_ioctl_get_ddk_version) + +/** + * struct kbase_ioctl_mem_jit_init - Initialise the JIT memory allocator + * + * @va_pages: Number of VA pages to reserve for JIT + * + * Note that depending on the VA size of the application and GPU, the value + * specified in @va_pages may be ignored. + */ +struct kbase_ioctl_mem_jit_init { + __u64 va_pages; +}; + +#define KBASE_IOCTL_MEM_JIT_INIT \ + _IOW(KBASE_IOCTL_TYPE, 14, struct kbase_ioctl_mem_jit_init) + +/** + * struct kbase_ioctl_mem_sync - Perform cache maintenance on memory + * + * @handle: GPU memory handle (GPU VA) + * @user_addr: The address where it is mapped in user space + * @size: The number of bytes to synchronise + * @type: The direction to synchronise: 0 is sync to memory (clean), + * 1 is sync from memory (invalidate). Use the BASE_SYNCSET_OP_xxx constants. + * @padding: Padding to round up to a multiple of 8 bytes, must be zero + */ +struct kbase_ioctl_mem_sync { + __u64 handle; + __u64 user_addr; + __u64 size; + __u8 type; + __u8 padding[7]; +}; + +#define KBASE_IOCTL_MEM_SYNC \ + _IOW(KBASE_IOCTL_TYPE, 15, struct kbase_ioctl_mem_sync) + +/** + * union kbase_ioctl_mem_find_cpu_offset - Find the offset of a CPU pointer + * + * @gpu_addr: The GPU address of the memory region + * @cpu_addr: The CPU address to locate + * @size: A size in bytes to validate is contained within the region + * @offset: The offset from the start of the memory region to @cpu_addr + * + * @in: Input parameters + * @out: Output parameters + */ +union kbase_ioctl_mem_find_cpu_offset { + struct { + __u64 gpu_addr; + __u64 cpu_addr; + __u64 size; + } in; + struct { + __u64 offset; + } out; +}; + +#define KBASE_IOCTL_MEM_FIND_CPU_OFFSET \ + _IOWR(KBASE_IOCTL_TYPE, 16, union kbase_ioctl_mem_find_cpu_offset) + +/** + * struct kbase_ioctl_get_context_id - Get the kernel context ID + * + * @id: The kernel context ID + */ +struct kbase_ioctl_get_context_id { + __u32 id; +}; + +#define KBASE_IOCTL_GET_CONTEXT_ID \ + _IOR(KBASE_IOCTL_TYPE, 17, struct kbase_ioctl_get_context_id) + +/** + * struct kbase_ioctl_tlstream_acquire - Acquire a tlstream fd + * + * @flags: Flags + * + * The ioctl returns a file descriptor when successful + */ +struct kbase_ioctl_tlstream_acquire { + __u32 flags; +}; + +#define KBASE_IOCTL_TLSTREAM_ACQUIRE \ + _IOW(KBASE_IOCTL_TYPE, 18, struct kbase_ioctl_tlstream_acquire) + +#define KBASE_IOCTL_TLSTREAM_FLUSH \ + _IO(KBASE_IOCTL_TYPE, 19) + +/** + * struct kbase_ioctl_mem_commit - Change the amount of memory backing a region + * + * @gpu_addr: The memory region to modify + * @pages: The number of physical pages that should be present + * + * The ioctl may return on the following error codes or 0 for success: + * -ENOMEM: Out of memory + * -EINVAL: Invalid arguments + */ +struct kbase_ioctl_mem_commit { + __u64 gpu_addr; + __u64 pages; +}; + +#define KBASE_IOCTL_MEM_COMMIT \ + _IOW(KBASE_IOCTL_TYPE, 20, struct kbase_ioctl_mem_commit) + +/** + * union kbase_ioctl_mem_alias - Create an alias of memory regions + * @flags: Flags, see BASE_MEM_xxx + * @stride: Bytes between start of each memory region + * @nents: The number of regions to pack together into the alias + * @aliasing_info: Pointer to an array of struct base_mem_aliasing_info + * @gpu_va: Address of the new alias + * @va_pages: Size of the new alias + * + * @in: Input parameters + * @out: Output parameters + */ +union kbase_ioctl_mem_alias { + struct { + __u64 flags; + __u64 stride; + __u64 nents; + __u64 aliasing_info; + } in; + struct { + __u64 flags; + __u64 gpu_va; + __u64 va_pages; + } out; +}; + +#define KBASE_IOCTL_MEM_ALIAS \ + _IOWR(KBASE_IOCTL_TYPE, 21, union kbase_ioctl_mem_alias) + +/** + * union kbase_ioctl_mem_import - Import memory for use by the GPU + * @flags: Flags, see BASE_MEM_xxx + * @phandle: Handle to the external memory + * @type: Type of external memory, see base_mem_import_type + * @padding: Amount of extra VA pages to append to the imported buffer + * @gpu_va: Address of the new alias + * @va_pages: Size of the new alias + * + * @in: Input parameters + * @out: Output parameters + */ +union kbase_ioctl_mem_import { + struct { + __u64 flags; + __u64 phandle; + __u32 type; + __u32 padding; + } in; + struct { + __u64 flags; + __u64 gpu_va; + __u64 va_pages; + } out; +}; + +#define KBASE_IOCTL_MEM_IMPORT \ + _IOWR(KBASE_IOCTL_TYPE, 22, union kbase_ioctl_mem_import) + +/** + * struct kbase_ioctl_mem_flags_change - Change the flags for a memory region + * @gpu_va: The GPU region to modify + * @flags: The new flags to set + * @mask: Mask of the flags to modify + */ +struct kbase_ioctl_mem_flags_change { + __u64 gpu_va; + __u64 flags; + __u64 mask; +}; + +#define KBASE_IOCTL_MEM_FLAGS_CHANGE \ + _IOW(KBASE_IOCTL_TYPE, 23, struct kbase_ioctl_mem_flags_change) + +/** + * struct kbase_ioctl_stream_create - Create a synchronisation stream + * @name: A name to identify this stream. Must be NULL-terminated. + * + * Note that this is also called a "timeline", but is named stream to avoid + * confusion with other uses of the word. + * + * Unused bytes in @name (after the first NULL byte) must be also be NULL bytes. + * + * The ioctl returns a file descriptor. + */ +struct kbase_ioctl_stream_create { + char name[32]; +}; + +#define KBASE_IOCTL_STREAM_CREATE \ + _IOW(KBASE_IOCTL_TYPE, 24, struct kbase_ioctl_stream_create) + +/** + * struct kbase_ioctl_fence_validate - Validate a fd refers to a fence + * @fd: The file descriptor to validate + */ +struct kbase_ioctl_fence_validate { + int fd; +}; + +#define KBASE_IOCTL_FENCE_VALIDATE \ + _IOW(KBASE_IOCTL_TYPE, 25, struct kbase_ioctl_fence_validate) + +/** + * struct kbase_ioctl_get_profiling_controls - Get the profiling controls + * @count: The size of @buffer in u32 words + * @buffer: The buffer to receive the profiling controls + * @padding: Padding + */ +struct kbase_ioctl_get_profiling_controls { + __u64 buffer; + __u32 count; + __u32 padding; +}; + +#define KBASE_IOCTL_GET_PROFILING_CONTROLS \ + _IOW(KBASE_IOCTL_TYPE, 26, struct kbase_ioctl_get_profiling_controls) + +/** + * struct kbase_ioctl_mem_profile_add - Provide profiling information to kernel + * @buffer: Pointer to the information + * @len: Length + * @padding: Padding + * + * The data provided is accessible through a debugfs file + */ +struct kbase_ioctl_mem_profile_add { + __u64 buffer; + __u32 len; + __u32 padding; +}; + +#define KBASE_IOCTL_MEM_PROFILE_ADD \ + _IOW(KBASE_IOCTL_TYPE, 27, struct kbase_ioctl_mem_profile_add) + +/** + * struct kbase_ioctl_soft_event_update - Update the status of a soft-event + * @event: GPU address of the event which has been updated + * @new_status: The new status to set + * @flags: Flags for future expansion + */ +struct kbase_ioctl_soft_event_update { + __u64 event; + __u32 new_status; + __u32 flags; +}; + +#define KBASE_IOCTL_SOFT_EVENT_UPDATE \ + _IOW(KBASE_IOCTL_TYPE, 28, struct kbase_ioctl_soft_event_update) + +/* IOCTLs 29-32 are reserved */ + +/*************** + * test ioctls * + ***************/ +#if MALI_UNIT_TEST +/* These ioctls are purely for test purposes and are not used in the production + * driver, they therefore may change without notice + */ + +#define KBASE_IOCTL_TEST_TYPE (KBASE_IOCTL_TYPE + 1) + +/** + * struct kbase_ioctl_tlstream_test - Start a timeline stream test + * + * @tpw_count: number of trace point writers in each context + * @msg_delay: time delay between tracepoints from one writer in milliseconds + * @msg_count: number of trace points written by one writer + * @aux_msg: if non-zero aux messages will be included + */ +struct kbase_ioctl_tlstream_test { + __u32 tpw_count; + __u32 msg_delay; + __u32 msg_count; + __u32 aux_msg; +}; + +#define KBASE_IOCTL_TLSTREAM_TEST \ + _IOW(KBASE_IOCTL_TEST_TYPE, 1, struct kbase_ioctl_tlstream_test) + +/** + * struct kbase_ioctl_tlstream_stats - Read tlstream stats for test purposes + * @bytes_collected: number of bytes read by user + * @bytes_generated: number of bytes generated by tracepoints + */ +struct kbase_ioctl_tlstream_stats { + __u32 bytes_collected; + __u32 bytes_generated; +}; + +#define KBASE_IOCTL_TLSTREAM_STATS \ + _IOR(KBASE_IOCTL_TEST_TYPE, 2, struct kbase_ioctl_tlstream_stats) + +#endif + +/********************************** + * Definitions for GPU properties * + **********************************/ +#define KBASE_GPUPROP_VALUE_SIZE_U8 (0x0) +#define KBASE_GPUPROP_VALUE_SIZE_U16 (0x1) +#define KBASE_GPUPROP_VALUE_SIZE_U32 (0x2) +#define KBASE_GPUPROP_VALUE_SIZE_U64 (0x3) + +#define KBASE_GPUPROP_PRODUCT_ID 1 +#define KBASE_GPUPROP_VERSION_STATUS 2 +#define KBASE_GPUPROP_MINOR_REVISION 3 +#define KBASE_GPUPROP_MAJOR_REVISION 4 +#define KBASE_GPUPROP_GPU_SPEED_MHZ 5 +#define KBASE_GPUPROP_GPU_FREQ_KHZ_MAX 6 +#define KBASE_GPUPROP_GPU_FREQ_KHZ_MIN 7 +#define KBASE_GPUPROP_LOG2_PROGRAM_COUNTER_SIZE 8 +#define KBASE_GPUPROP_TEXTURE_FEATURES_0 9 +#define KBASE_GPUPROP_TEXTURE_FEATURES_1 10 +#define KBASE_GPUPROP_TEXTURE_FEATURES_2 11 +#define KBASE_GPUPROP_GPU_AVAILABLE_MEMORY_SIZE 12 + +#define KBASE_GPUPROP_L2_LOG2_LINE_SIZE 13 +#define KBASE_GPUPROP_L2_LOG2_CACHE_SIZE 14 +#define KBASE_GPUPROP_L2_NUM_L2_SLICES 15 + +#define KBASE_GPUPROP_TILER_BIN_SIZE_BYTES 16 +#define KBASE_GPUPROP_TILER_MAX_ACTIVE_LEVELS 17 + +#define KBASE_GPUPROP_MAX_THREADS 18 +#define KBASE_GPUPROP_MAX_WORKGROUP_SIZE 19 +#define KBASE_GPUPROP_MAX_BARRIER_SIZE 20 +#define KBASE_GPUPROP_MAX_REGISTERS 21 +#define KBASE_GPUPROP_MAX_TASK_QUEUE 22 +#define KBASE_GPUPROP_MAX_THREAD_GROUP_SPLIT 23 +#define KBASE_GPUPROP_IMPL_TECH 24 + +#define KBASE_GPUPROP_RAW_SHADER_PRESENT 25 +#define KBASE_GPUPROP_RAW_TILER_PRESENT 26 +#define KBASE_GPUPROP_RAW_L2_PRESENT 27 +#define KBASE_GPUPROP_RAW_STACK_PRESENT 28 +#define KBASE_GPUPROP_RAW_L2_FEATURES 29 +#define KBASE_GPUPROP_RAW_SUSPEND_SIZE 30 +#define KBASE_GPUPROP_RAW_MEM_FEATURES 31 +#define KBASE_GPUPROP_RAW_MMU_FEATURES 32 +#define KBASE_GPUPROP_RAW_AS_PRESENT 33 +#define KBASE_GPUPROP_RAW_JS_PRESENT 34 +#define KBASE_GPUPROP_RAW_JS_FEATURES_0 35 +#define KBASE_GPUPROP_RAW_JS_FEATURES_1 36 +#define KBASE_GPUPROP_RAW_JS_FEATURES_2 37 +#define KBASE_GPUPROP_RAW_JS_FEATURES_3 38 +#define KBASE_GPUPROP_RAW_JS_FEATURES_4 39 +#define KBASE_GPUPROP_RAW_JS_FEATURES_5 40 +#define KBASE_GPUPROP_RAW_JS_FEATURES_6 41 +#define KBASE_GPUPROP_RAW_JS_FEATURES_7 42 +#define KBASE_GPUPROP_RAW_JS_FEATURES_8 43 +#define KBASE_GPUPROP_RAW_JS_FEATURES_9 44 +#define KBASE_GPUPROP_RAW_JS_FEATURES_10 45 +#define KBASE_GPUPROP_RAW_JS_FEATURES_11 46 +#define KBASE_GPUPROP_RAW_JS_FEATURES_12 47 +#define KBASE_GPUPROP_RAW_JS_FEATURES_13 48 +#define KBASE_GPUPROP_RAW_JS_FEATURES_14 49 +#define KBASE_GPUPROP_RAW_JS_FEATURES_15 50 +#define KBASE_GPUPROP_RAW_TILER_FEATURES 51 +#define KBASE_GPUPROP_RAW_TEXTURE_FEATURES_0 52 +#define KBASE_GPUPROP_RAW_TEXTURE_FEATURES_1 53 +#define KBASE_GPUPROP_RAW_TEXTURE_FEATURES_2 54 +#define KBASE_GPUPROP_RAW_GPU_ID 55 +#define KBASE_GPUPROP_RAW_THREAD_MAX_THREADS 56 +#define KBASE_GPUPROP_RAW_THREAD_MAX_WORKGROUP_SIZE 57 +#define KBASE_GPUPROP_RAW_THREAD_MAX_BARRIER_SIZE 58 +#define KBASE_GPUPROP_RAW_THREAD_FEATURES 59 +#define KBASE_GPUPROP_RAW_COHERENCY_MODE 60 + +#define KBASE_GPUPROP_COHERENCY_NUM_GROUPS 61 +#define KBASE_GPUPROP_COHERENCY_NUM_CORE_GROUPS 62 +#define KBASE_GPUPROP_COHERENCY_COHERENCY 63 +#define KBASE_GPUPROP_COHERENCY_GROUP_0 64 +#define KBASE_GPUPROP_COHERENCY_GROUP_1 65 +#define KBASE_GPUPROP_COHERENCY_GROUP_2 66 +#define KBASE_GPUPROP_COHERENCY_GROUP_3 67 +#define KBASE_GPUPROP_COHERENCY_GROUP_4 68 +#define KBASE_GPUPROP_COHERENCY_GROUP_5 69 +#define KBASE_GPUPROP_COHERENCY_GROUP_6 70 +#define KBASE_GPUPROP_COHERENCY_GROUP_7 71 +#define KBASE_GPUPROP_COHERENCY_GROUP_8 72 +#define KBASE_GPUPROP_COHERENCY_GROUP_9 73 +#define KBASE_GPUPROP_COHERENCY_GROUP_10 74 +#define KBASE_GPUPROP_COHERENCY_GROUP_11 75 +#define KBASE_GPUPROP_COHERENCY_GROUP_12 76 +#define KBASE_GPUPROP_COHERENCY_GROUP_13 77 +#define KBASE_GPUPROP_COHERENCY_GROUP_14 78 +#define KBASE_GPUPROP_COHERENCY_GROUP_15 79 + +#ifdef __cpluscplus +} +#endif + +#endif diff --git a/drivers/gpu/arm/midgard/mali_kbase_jd.c b/drivers/gpu/arm/midgard/mali_kbase_jd.c new file mode 100644 index 00000000000000..4172a282b00498 --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_jd.c @@ -0,0 +1,1630 @@ +/* + * + * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + + +#if defined(CONFIG_DMA_SHARED_BUFFER) +#include +#endif /* defined(CONFIG_DMA_SHARED_BUFFER) */ +#ifdef CONFIG_COMPAT +#include +#endif +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mali_kbase_dma_fence.h" + +#define beenthere(kctx, f, a...) dev_dbg(kctx->kbdev->dev, "%s:" f, __func__, ##a) + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0) +/* random32 was renamed to prandom_u32 in 3.8 */ +#define prandom_u32 random32 +#endif + +/* Return whether katom will run on the GPU or not. Currently only soft jobs and + * dependency-only atoms do not run on the GPU */ +#define IS_GPU_ATOM(katom) (!((katom->core_req & BASE_JD_REQ_SOFT_JOB) || \ + ((katom->core_req & BASE_JD_REQ_ATOM_TYPE) == \ + BASE_JD_REQ_DEP))) +/* + * This is the kernel side of the API. Only entry points are: + * - kbase_jd_submit(): Called from userspace to submit a single bag + * - kbase_jd_done(): Called from interrupt context to track the + * completion of a job. + * Callouts: + * - to the job manager (enqueue a job) + * - to the event subsystem (signals the completion/failure of bag/job-chains). + */ + +static void __user * +get_compat_pointer(struct kbase_context *kctx, const u64 p) +{ +#ifdef CONFIG_COMPAT + if (kbase_ctx_flag(kctx, KCTX_COMPAT)) + return compat_ptr(p); +#endif + return u64_to_user_ptr(p); +} + +/* Runs an atom, either by handing to the JS or by immediately running it in the case of soft-jobs + * + * Returns whether the JS needs a reschedule. + * + * Note that the caller must also check the atom status and + * if it is KBASE_JD_ATOM_STATE_COMPLETED must call jd_done_nolock + */ +static int jd_run_atom(struct kbase_jd_atom *katom) +{ + struct kbase_context *kctx = katom->kctx; + + KBASE_DEBUG_ASSERT(katom->status != KBASE_JD_ATOM_STATE_UNUSED); + + if ((katom->core_req & BASE_JD_REQ_ATOM_TYPE) == BASE_JD_REQ_DEP) { + /* Dependency only atom */ + katom->status = KBASE_JD_ATOM_STATE_COMPLETED; + return 0; + } else if (katom->core_req & BASE_JD_REQ_SOFT_JOB) { + /* Soft-job */ + if (katom->will_fail_event_code) { + katom->status = KBASE_JD_ATOM_STATE_COMPLETED; + return 0; + } + if ((katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) + == BASE_JD_REQ_SOFT_REPLAY) { + if (!kbase_replay_process(katom)) + katom->status = KBASE_JD_ATOM_STATE_COMPLETED; + } else if (kbase_process_soft_job(katom) == 0) { + kbase_finish_soft_job(katom); + katom->status = KBASE_JD_ATOM_STATE_COMPLETED; + } + return 0; + } + + katom->status = KBASE_JD_ATOM_STATE_IN_JS; + /* Queue an action about whether we should try scheduling a context */ + return kbasep_js_add_job(kctx, katom); +} + +#if defined(CONFIG_MALI_DMA_FENCE) +void kbase_jd_dep_clear_locked(struct kbase_jd_atom *katom) +{ + struct kbase_device *kbdev; + + KBASE_DEBUG_ASSERT(katom); + kbdev = katom->kctx->kbdev; + KBASE_DEBUG_ASSERT(kbdev); + + /* Check whether the atom's other dependencies were already met. If + * katom is a GPU atom then the job scheduler may be able to represent + * the dependencies, hence we may attempt to submit it before they are + * met. Other atoms must have had both dependencies resolved. + */ + if (IS_GPU_ATOM(katom) || + (!kbase_jd_katom_dep_atom(&katom->dep[0]) && + !kbase_jd_katom_dep_atom(&katom->dep[1]))) { + /* katom dep complete, attempt to run it */ + bool resched = false; + + resched = jd_run_atom(katom); + + if (katom->status == KBASE_JD_ATOM_STATE_COMPLETED) { + /* The atom has already finished */ + resched |= jd_done_nolock(katom, NULL); + } + + if (resched) + kbase_js_sched_all(kbdev); + } +} +#endif + +void kbase_jd_free_external_resources(struct kbase_jd_atom *katom) +{ +#ifdef CONFIG_MALI_DMA_FENCE + /* Flush dma-fence workqueue to ensure that any callbacks that may have + * been queued are done before continuing. + * Any successfully completed atom would have had all it's callbacks + * completed before the atom was run, so only flush for failed atoms. + */ + if (katom->event_code != BASE_JD_EVENT_DONE) + flush_workqueue(katom->kctx->dma_fence.wq); +#endif /* CONFIG_MALI_DMA_FENCE */ +} + +static void kbase_jd_post_external_resources(struct kbase_jd_atom *katom) +{ + KBASE_DEBUG_ASSERT(katom); + KBASE_DEBUG_ASSERT(katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES); + +#ifdef CONFIG_MALI_DMA_FENCE + kbase_dma_fence_signal(katom); +#endif /* CONFIG_MALI_DMA_FENCE */ + + kbase_gpu_vm_lock(katom->kctx); + /* only roll back if extres is non-NULL */ + if (katom->extres) { + u32 res_no; + + res_no = katom->nr_extres; + while (res_no-- > 0) { + struct kbase_mem_phy_alloc *alloc = katom->extres[res_no].alloc; + struct kbase_va_region *reg; + + reg = kbase_region_tracker_find_region_base_address( + katom->kctx, + katom->extres[res_no].gpu_address); + kbase_unmap_external_resource(katom->kctx, reg, alloc); + } + kfree(katom->extres); + katom->extres = NULL; + } + kbase_gpu_vm_unlock(katom->kctx); +} + +/* + * Set up external resources needed by this job. + * + * jctx.lock must be held when this is called. + */ + +static int kbase_jd_pre_external_resources(struct kbase_jd_atom *katom, const struct base_jd_atom_v2 *user_atom) +{ + int err_ret_val = -EINVAL; + u32 res_no; +#ifdef CONFIG_MALI_DMA_FENCE + struct kbase_dma_fence_resv_info info = { + .dma_fence_resv_count = 0, + }; +#ifdef CONFIG_SYNC + /* + * When both dma-buf fence and Android native sync is enabled, we + * disable dma-buf fence for contexts that are using Android native + * fences. + */ + const bool implicit_sync = !kbase_ctx_flag(katom->kctx, + KCTX_NO_IMPLICIT_SYNC); +#else /* CONFIG_SYNC */ + const bool implicit_sync = true; +#endif /* CONFIG_SYNC */ +#endif /* CONFIG_MALI_DMA_FENCE */ + struct base_external_resource *input_extres; + + KBASE_DEBUG_ASSERT(katom); + KBASE_DEBUG_ASSERT(katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES); + + /* no resources encoded, early out */ + if (!katom->nr_extres) + return -EINVAL; + + katom->extres = kmalloc_array(katom->nr_extres, sizeof(*katom->extres), GFP_KERNEL); + if (NULL == katom->extres) { + err_ret_val = -ENOMEM; + goto early_err_out; + } + + /* copy user buffer to the end of our real buffer. + * Make sure the struct sizes haven't changed in a way + * we don't support */ + BUILD_BUG_ON(sizeof(*input_extres) > sizeof(*katom->extres)); + input_extres = (struct base_external_resource *) + (((unsigned char *)katom->extres) + + (sizeof(*katom->extres) - sizeof(*input_extres)) * + katom->nr_extres); + + if (copy_from_user(input_extres, + get_compat_pointer(katom->kctx, user_atom->extres_list), + sizeof(*input_extres) * katom->nr_extres) != 0) { + err_ret_val = -EINVAL; + goto early_err_out; + } + +#ifdef CONFIG_MALI_DMA_FENCE + if (implicit_sync) { + info.resv_objs = kmalloc_array(katom->nr_extres, + sizeof(struct dma_resv *), + GFP_KERNEL); + if (!info.resv_objs) { + err_ret_val = -ENOMEM; + goto early_err_out; + } + + info.dma_fence_excl_bitmap = + kcalloc(BITS_TO_LONGS(katom->nr_extres), + sizeof(unsigned long), GFP_KERNEL); + if (!info.dma_fence_excl_bitmap) { + err_ret_val = -ENOMEM; + goto early_err_out; + } + } +#endif /* CONFIG_MALI_DMA_FENCE */ + + /* Take the processes mmap lock */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0) + down_read(¤t->mm->mmap_lock); +#else + down_read(¤t->mm->mmap_sem); +#endif + + /* need to keep the GPU VM locked while we set up UMM buffers */ + kbase_gpu_vm_lock(katom->kctx); + for (res_no = 0; res_no < katom->nr_extres; res_no++) { + struct base_external_resource *res; + struct kbase_va_region *reg; + struct kbase_mem_phy_alloc *alloc; + bool exclusive; + + res = &input_extres[res_no]; + exclusive = (res->ext_resource & BASE_EXT_RES_ACCESS_EXCLUSIVE) + ? true : false; + reg = kbase_region_tracker_find_region_enclosing_address( + katom->kctx, + res->ext_resource & ~BASE_EXT_RES_ACCESS_EXCLUSIVE); + /* did we find a matching region object? */ + if (NULL == reg || (reg->flags & KBASE_REG_FREE)) { + /* roll back */ + goto failed_loop; + } + + if (!(katom->core_req & BASE_JD_REQ_SOFT_JOB) && + (reg->flags & KBASE_REG_SECURE)) { + katom->atom_flags |= KBASE_KATOM_FLAG_PROTECTED; + } + + alloc = kbase_map_external_resource(katom->kctx, reg, + current->mm); + if (!alloc) { + err_ret_val = -EINVAL; + goto failed_loop; + } + +#ifdef CONFIG_MALI_DMA_FENCE + if (implicit_sync && + reg->gpu_alloc->type == KBASE_MEM_TYPE_IMPORTED_UMM) { + struct dma_resv *resv; + + resv = reg->gpu_alloc->imported.umm.dma_buf->resv; + if (resv) + kbase_dma_fence_add_reservation(resv, &info, + exclusive); + } +#endif /* CONFIG_MALI_DMA_FENCE */ + + /* finish with updating out array with the data we found */ + /* NOTE: It is important that this is the last thing we do (or + * at least not before the first write) as we overwrite elements + * as we loop and could be overwriting ourself, so no writes + * until the last read for an element. + * */ + katom->extres[res_no].gpu_address = reg->start_pfn << PAGE_SHIFT; /* save the start_pfn (as an address, not pfn) to use fast lookup later */ + katom->extres[res_no].alloc = alloc; + } + /* successfully parsed the extres array */ + /* drop the vm lock now */ + kbase_gpu_vm_unlock(katom->kctx); + + /* Release the processes mmap lock */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0) + up_read(¤t->mm->mmap_lock); +#else + up_read(¤t->mm->mmap_sem); +#endif + +#ifdef CONFIG_MALI_DMA_FENCE + if (implicit_sync) { + if (info.dma_fence_resv_count) { + int ret; + + ret = kbase_dma_fence_wait(katom, &info); + if (ret < 0) + goto failed_dma_fence_setup; + } + + kfree(info.resv_objs); + kfree(info.dma_fence_excl_bitmap); + } +#endif /* CONFIG_MALI_DMA_FENCE */ + + /* all done OK */ + return 0; + +/* error handling section */ + +#ifdef CONFIG_MALI_DMA_FENCE +failed_dma_fence_setup: + /* Lock the processes mmap lock */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0) + down_read(¤t->mm->mmap_lock); +#else + down_read(¤t->mm->mmap_sem); +#endif + + /* lock before we unmap */ + kbase_gpu_vm_lock(katom->kctx); +#endif + + failed_loop: + /* undo the loop work */ + while (res_no-- > 0) { + struct kbase_mem_phy_alloc *alloc = katom->extres[res_no].alloc; + + kbase_unmap_external_resource(katom->kctx, NULL, alloc); + } + kbase_gpu_vm_unlock(katom->kctx); + + /* Release the processes mmap lock */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0) + up_read(¤t->mm->mmap_lock); +#else + up_read(¤t->mm->mmap_sem); +#endif + + early_err_out: + kfree(katom->extres); + katom->extres = NULL; +#ifdef CONFIG_MALI_DMA_FENCE + if (implicit_sync) { + kfree(info.resv_objs); + kfree(info.dma_fence_excl_bitmap); + } +#endif + return err_ret_val; +} + +static inline void jd_resolve_dep(struct list_head *out_list, + struct kbase_jd_atom *katom, + u8 d, bool ctx_is_dying) +{ + u8 other_d = !d; + + while (!list_empty(&katom->dep_head[d])) { + struct kbase_jd_atom *dep_atom; + struct kbase_jd_atom *other_dep_atom; + u8 dep_type; + + dep_atom = list_entry(katom->dep_head[d].next, + struct kbase_jd_atom, dep_item[d]); + list_del(katom->dep_head[d].next); + + dep_type = kbase_jd_katom_dep_type(&dep_atom->dep[d]); + kbase_jd_katom_dep_clear(&dep_atom->dep[d]); + + if (katom->event_code != BASE_JD_EVENT_DONE && + (dep_type != BASE_JD_DEP_TYPE_ORDER)) { +#ifdef CONFIG_MALI_DMA_FENCE + kbase_dma_fence_cancel_callbacks(dep_atom); +#endif + + dep_atom->event_code = katom->event_code; + KBASE_DEBUG_ASSERT(dep_atom->status != + KBASE_JD_ATOM_STATE_UNUSED); + + if ((dep_atom->core_req & BASE_JD_REQ_SOFT_REPLAY) + != BASE_JD_REQ_SOFT_REPLAY) { + dep_atom->will_fail_event_code = + dep_atom->event_code; + } else { + dep_atom->status = + KBASE_JD_ATOM_STATE_COMPLETED; + } + } + other_dep_atom = (struct kbase_jd_atom *) + kbase_jd_katom_dep_atom(&dep_atom->dep[other_d]); + + if (!dep_atom->in_jd_list && (!other_dep_atom || + (IS_GPU_ATOM(dep_atom) && !ctx_is_dying && + !dep_atom->will_fail_event_code && + !other_dep_atom->will_fail_event_code))) { + bool dep_satisfied = true; +#ifdef CONFIG_MALI_DMA_FENCE + int dep_count; + + dep_count = kbase_fence_dep_count_read(dep_atom); + if (likely(dep_count == -1)) { + dep_satisfied = true; + } else { + /* + * There are either still active callbacks, or + * all fences for this @dep_atom has signaled, + * but the worker that will queue the atom has + * not yet run. + * + * Wait for the fences to signal and the fence + * worker to run and handle @dep_atom. If + * @dep_atom was completed due to error on + * @katom, then the fence worker will pick up + * the complete status and error code set on + * @dep_atom above. + */ + dep_satisfied = false; + } +#endif /* CONFIG_MALI_DMA_FENCE */ + + if (dep_satisfied) { + dep_atom->in_jd_list = true; + list_add_tail(&dep_atom->jd_item, out_list); + } + } + } +} + +KBASE_EXPORT_TEST_API(jd_resolve_dep); + +#if MALI_CUSTOMER_RELEASE == 0 +static void jd_force_failure(struct kbase_device *kbdev, struct kbase_jd_atom *katom) +{ + kbdev->force_replay_count++; + + if (kbdev->force_replay_count >= kbdev->force_replay_limit) { + kbdev->force_replay_count = 0; + katom->event_code = BASE_JD_EVENT_FORCE_REPLAY; + + if (kbdev->force_replay_random) + kbdev->force_replay_limit = + (prandom_u32() % KBASEP_FORCE_REPLAY_RANDOM_LIMIT) + 1; + + dev_info(kbdev->dev, "force_replay : promoting to error\n"); + } +} + +/** Test to see if atom should be forced to fail. + * + * This function will check if an atom has a replay job as a dependent. If so + * then it will be considered for forced failure. */ +static void jd_check_force_failure(struct kbase_jd_atom *katom) +{ + struct kbase_context *kctx = katom->kctx; + struct kbase_device *kbdev = kctx->kbdev; + int i; + + if ((kbdev->force_replay_limit == KBASEP_FORCE_REPLAY_DISABLED) || + (katom->core_req & BASEP_JD_REQ_EVENT_NEVER)) + return; + + for (i = 1; i < BASE_JD_ATOM_COUNT; i++) { + if (kbase_jd_katom_dep_atom(&kctx->jctx.atoms[i].dep[0]) == katom || + kbase_jd_katom_dep_atom(&kctx->jctx.atoms[i].dep[1]) == katom) { + struct kbase_jd_atom *dep_atom = &kctx->jctx.atoms[i]; + + if ((dep_atom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) == + BASE_JD_REQ_SOFT_REPLAY && + (dep_atom->core_req & kbdev->force_replay_core_req) + == kbdev->force_replay_core_req) { + jd_force_failure(kbdev, katom); + return; + } + } + } +} +#endif + +/** + * is_dep_valid - Validate that a dependency is valid for early dependency + * submission + * @katom: Dependency atom to validate + * + * A dependency is valid if any of the following are true : + * - It does not exist (a non-existent dependency does not block submission) + * - It is in the job scheduler + * - It has completed, does not have a failure event code, and has not been + * marked to fail in the future + * + * Return: true if valid, false otherwise + */ +static bool is_dep_valid(struct kbase_jd_atom *katom) +{ + /* If there's no dependency then this is 'valid' from the perspective of + * early dependency submission */ + if (!katom) + return true; + + /* Dependency must have reached the job scheduler */ + if (katom->status < KBASE_JD_ATOM_STATE_IN_JS) + return false; + + /* If dependency has completed and has failed or will fail then it is + * not valid */ + if (katom->status >= KBASE_JD_ATOM_STATE_HW_COMPLETED && + (katom->event_code != BASE_JD_EVENT_DONE || + katom->will_fail_event_code)) + return false; + + return true; +} + +static void jd_try_submitting_deps(struct list_head *out_list, + struct kbase_jd_atom *node) +{ + int i; + + for (i = 0; i < 2; i++) { + struct list_head *pos; + + list_for_each(pos, &node->dep_head[i]) { + struct kbase_jd_atom *dep_atom = list_entry(pos, + struct kbase_jd_atom, dep_item[i]); + + if (IS_GPU_ATOM(dep_atom) && !dep_atom->in_jd_list) { + /*Check if atom deps look sane*/ + bool dep0_valid = is_dep_valid( + dep_atom->dep[0].atom); + bool dep1_valid = is_dep_valid( + dep_atom->dep[1].atom); + bool dep_satisfied = true; +#ifdef CONFIG_MALI_DMA_FENCE + int dep_count; + + dep_count = kbase_fence_dep_count_read( + dep_atom); + if (likely(dep_count == -1)) { + dep_satisfied = true; + } else { + /* + * There are either still active callbacks, or + * all fences for this @dep_atom has signaled, + * but the worker that will queue the atom has + * not yet run. + * + * Wait for the fences to signal and the fence + * worker to run and handle @dep_atom. If + * @dep_atom was completed due to error on + * @katom, then the fence worker will pick up + * the complete status and error code set on + * @dep_atom above. + */ + dep_satisfied = false; + } +#endif /* CONFIG_MALI_DMA_FENCE */ + + if (dep0_valid && dep1_valid && dep_satisfied) { + dep_atom->in_jd_list = true; + list_add(&dep_atom->jd_item, out_list); + } + } + } + } +} + +/* + * Perform the necessary handling of an atom that has finished running + * on the GPU. + * + * Note that if this is a soft-job that has had kbase_prepare_soft_job called on it then the caller + * is responsible for calling kbase_finish_soft_job *before* calling this function. + * + * The caller must hold the kbase_jd_context.lock. + */ +bool jd_done_nolock(struct kbase_jd_atom *katom, + struct list_head *completed_jobs_ctx) +{ + struct kbase_context *kctx = katom->kctx; + struct kbase_device *kbdev = kctx->kbdev; + struct list_head completed_jobs; + struct list_head runnable_jobs; + bool need_to_try_schedule_context = false; + int i; + + INIT_LIST_HEAD(&completed_jobs); + INIT_LIST_HEAD(&runnable_jobs); + + KBASE_DEBUG_ASSERT(katom->status != KBASE_JD_ATOM_STATE_UNUSED); + +#if MALI_CUSTOMER_RELEASE == 0 + jd_check_force_failure(katom); +#endif + + /* This is needed in case an atom is failed due to being invalid, this + * can happen *before* the jobs that the atom depends on have completed */ + for (i = 0; i < 2; i++) { + if (kbase_jd_katom_dep_atom(&katom->dep[i])) { + list_del(&katom->dep_item[i]); + kbase_jd_katom_dep_clear(&katom->dep[i]); + } + } + + /* With PRLAM-10817 or PRLAM-10959 the last tile of a fragment job being soft-stopped can fail with + * BASE_JD_EVENT_TILE_RANGE_FAULT. + * + * So here if the fragment job failed with TILE_RANGE_FAULT and it has been soft-stopped, then we promote the + * error code to BASE_JD_EVENT_DONE + */ + + if ((kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10817) || kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10959)) && + katom->event_code == BASE_JD_EVENT_TILE_RANGE_FAULT) { + if ((katom->core_req & BASE_JD_REQ_FS) && (katom->atom_flags & KBASE_KATOM_FLAG_BEEN_SOFT_STOPPPED)) { + /* Promote the failure to job done */ + katom->event_code = BASE_JD_EVENT_DONE; + katom->atom_flags = katom->atom_flags & (~KBASE_KATOM_FLAG_BEEN_SOFT_STOPPPED); + } + } + + katom->status = KBASE_JD_ATOM_STATE_COMPLETED; + list_add_tail(&katom->jd_item, &completed_jobs); + + while (!list_empty(&completed_jobs)) { + katom = list_entry(completed_jobs.prev, struct kbase_jd_atom, jd_item); + list_del(completed_jobs.prev); + KBASE_DEBUG_ASSERT(katom->status == KBASE_JD_ATOM_STATE_COMPLETED); + + for (i = 0; i < 2; i++) + jd_resolve_dep(&runnable_jobs, katom, i, + kbase_ctx_flag(kctx, KCTX_DYING)); + + if (katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES) + kbase_jd_post_external_resources(katom); + + while (!list_empty(&runnable_jobs)) { + struct kbase_jd_atom *node; + + node = list_entry(runnable_jobs.next, + struct kbase_jd_atom, jd_item); + list_del(runnable_jobs.next); + node->in_jd_list = false; + + KBASE_DEBUG_ASSERT(node->status != KBASE_JD_ATOM_STATE_UNUSED); + + if (node->status != KBASE_JD_ATOM_STATE_COMPLETED && + !kbase_ctx_flag(kctx, KCTX_DYING)) { + need_to_try_schedule_context |= jd_run_atom(node); + } else { + node->event_code = katom->event_code; + + if ((node->core_req & + BASE_JD_REQ_SOFT_JOB_TYPE) == + BASE_JD_REQ_SOFT_REPLAY) { + if (kbase_replay_process(node)) + /* Don't complete this atom */ + continue; + } else if (node->core_req & + BASE_JD_REQ_SOFT_JOB) { + /* If this is a fence wait soft job + * then remove it from the list of sync + * waiters. + */ + if (BASE_JD_REQ_SOFT_FENCE_WAIT == node->core_req) + kbasep_remove_waiting_soft_job(node); + + kbase_finish_soft_job(node); + } + node->status = KBASE_JD_ATOM_STATE_COMPLETED; + } + + if (node->status == KBASE_JD_ATOM_STATE_COMPLETED) { + list_add_tail(&node->jd_item, &completed_jobs); + } else if (node->status == KBASE_JD_ATOM_STATE_IN_JS && + !node->will_fail_event_code) { + /* Node successfully submitted, try submitting + * dependencies as they may now be representable + * in JS */ + jd_try_submitting_deps(&runnable_jobs, node); + } + } + + /* Register a completed job as a disjoint event when the GPU + * is in a disjoint state (ie. being reset or replaying jobs). + */ + kbase_disjoint_event_potential(kctx->kbdev); + if (completed_jobs_ctx) + list_add_tail(&katom->jd_item, completed_jobs_ctx); + else + kbase_event_post(kctx, katom); + + /* Decrement and check the TOTAL number of jobs. This includes + * those not tracked by the scheduler: 'not ready to run' and + * 'dependency-only' jobs. */ + if (--kctx->jctx.job_nr == 0) + wake_up(&kctx->jctx.zero_jobs_wait); /* All events are safely queued now, and we can signal any waiter + * that we've got no more jobs (so we can be safely terminated) */ + } + + return need_to_try_schedule_context; +} + +KBASE_EXPORT_TEST_API(jd_done_nolock); + +#ifdef CONFIG_GPU_TRACEPOINTS +enum { + CORE_REQ_DEP_ONLY, + CORE_REQ_SOFT, + CORE_REQ_COMPUTE, + CORE_REQ_FRAGMENT, + CORE_REQ_VERTEX, + CORE_REQ_TILER, + CORE_REQ_FRAGMENT_VERTEX, + CORE_REQ_FRAGMENT_VERTEX_TILER, + CORE_REQ_FRAGMENT_TILER, + CORE_REQ_VERTEX_TILER, + CORE_REQ_UNKNOWN +}; +static const char * const core_req_strings[] = { + "Dependency Only Job", + "Soft Job", + "Compute Shader Job", + "Fragment Shader Job", + "Vertex/Geometry Shader Job", + "Tiler Job", + "Fragment Shader + Vertex/Geometry Shader Job", + "Fragment Shader + Vertex/Geometry Shader Job + Tiler Job", + "Fragment Shader + Tiler Job", + "Vertex/Geometry Shader Job + Tiler Job", + "Unknown Job" +}; +static const char *kbasep_map_core_reqs_to_string(base_jd_core_req core_req) +{ + if (core_req & BASE_JD_REQ_SOFT_JOB) + return core_req_strings[CORE_REQ_SOFT]; + if (core_req & BASE_JD_REQ_ONLY_COMPUTE) + return core_req_strings[CORE_REQ_COMPUTE]; + switch (core_req & (BASE_JD_REQ_FS | BASE_JD_REQ_CS | BASE_JD_REQ_T)) { + case BASE_JD_REQ_DEP: + return core_req_strings[CORE_REQ_DEP_ONLY]; + case BASE_JD_REQ_FS: + return core_req_strings[CORE_REQ_FRAGMENT]; + case BASE_JD_REQ_CS: + return core_req_strings[CORE_REQ_VERTEX]; + case BASE_JD_REQ_T: + return core_req_strings[CORE_REQ_TILER]; + case (BASE_JD_REQ_FS | BASE_JD_REQ_CS): + return core_req_strings[CORE_REQ_FRAGMENT_VERTEX]; + case (BASE_JD_REQ_FS | BASE_JD_REQ_T): + return core_req_strings[CORE_REQ_FRAGMENT_TILER]; + case (BASE_JD_REQ_CS | BASE_JD_REQ_T): + return core_req_strings[CORE_REQ_VERTEX_TILER]; + case (BASE_JD_REQ_FS | BASE_JD_REQ_CS | BASE_JD_REQ_T): + return core_req_strings[CORE_REQ_FRAGMENT_VERTEX_TILER]; + } + return core_req_strings[CORE_REQ_UNKNOWN]; +} +#endif + +bool jd_submit_atom(struct kbase_context *kctx, const struct base_jd_atom_v2 *user_atom, struct kbase_jd_atom *katom) +{ + struct kbase_jd_context *jctx = &kctx->jctx; + int queued = 0; + int i; + int sched_prio; + bool ret; + bool will_fail = false; + + /* Update the TOTAL number of jobs. This includes those not tracked by + * the scheduler: 'not ready to run' and 'dependency-only' jobs. */ + jctx->job_nr++; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) + katom->start_timestamp.tv64 = 0; +#else + katom->start_timestamp = 0; +#endif + katom->udata = user_atom->udata; + katom->kctx = kctx; + katom->nr_extres = user_atom->nr_extres; + katom->extres = NULL; + katom->device_nr = user_atom->device_nr; + katom->affinity = 0; + katom->jc = user_atom->jc; + katom->coreref_state = KBASE_ATOM_COREREF_STATE_NO_CORES_REQUESTED; + katom->core_req = user_atom->core_req; + katom->atom_flags = 0; + katom->retry_count = 0; + katom->need_cache_flush_cores_retained = 0; + katom->pre_dep = NULL; + katom->post_dep = NULL; + katom->x_pre_dep = NULL; + katom->x_post_dep = NULL; + katom->will_fail_event_code = BASE_JD_EVENT_NOT_STARTED; + + /* Implicitly sets katom->protected_state.enter as well. */ + katom->protected_state.exit = KBASE_ATOM_EXIT_PROTECTED_CHECK; + + katom->age = kctx->age_count++; + + INIT_LIST_HEAD(&katom->jd_item); +#ifdef CONFIG_MALI_DMA_FENCE + kbase_fence_dep_count_set(katom, -1); +#endif + + /* Don't do anything if there is a mess up with dependencies. + This is done in a separate cycle to check both the dependencies at ones, otherwise + it will be extra complexity to deal with 1st dependency ( just added to the list ) + if only the 2nd one has invalid config. + */ + for (i = 0; i < 2; i++) { + int dep_atom_number = user_atom->pre_dep[i].atom_id; + base_jd_dep_type dep_atom_type = user_atom->pre_dep[i].dependency_type; + + if (dep_atom_number) { + if (dep_atom_type != BASE_JD_DEP_TYPE_ORDER && + dep_atom_type != BASE_JD_DEP_TYPE_DATA) { + katom->event_code = BASE_JD_EVENT_JOB_CONFIG_FAULT; + katom->status = KBASE_JD_ATOM_STATE_COMPLETED; + + /* Wrong dependency setup. Atom will be sent + * back to user space. Do not record any + * dependencies. */ + KBASE_TLSTREAM_TL_NEW_ATOM( + katom, + kbase_jd_atom_id(kctx, katom)); + KBASE_TLSTREAM_TL_RET_ATOM_CTX( + katom, kctx); + KBASE_TLSTREAM_TL_ATTRIB_ATOM_STATE(katom, + TL_ATOM_STATE_IDLE); + + ret = jd_done_nolock(katom, NULL); + goto out; + } + } + } + + /* Add dependencies */ + for (i = 0; i < 2; i++) { + int dep_atom_number = user_atom->pre_dep[i].atom_id; + base_jd_dep_type dep_atom_type; + struct kbase_jd_atom *dep_atom = &jctx->atoms[dep_atom_number]; + + dep_atom_type = user_atom->pre_dep[i].dependency_type; + kbase_jd_katom_dep_clear(&katom->dep[i]); + + if (!dep_atom_number) + continue; + + if (dep_atom->status == KBASE_JD_ATOM_STATE_UNUSED || + dep_atom->status == KBASE_JD_ATOM_STATE_COMPLETED) { + + if (dep_atom->event_code == BASE_JD_EVENT_DONE) + continue; + /* don't stop this atom if it has an order dependency + * only to the failed one, try to submit it through + * the normal path + */ + if (dep_atom_type == BASE_JD_DEP_TYPE_ORDER && + dep_atom->event_code > BASE_JD_EVENT_ACTIVE) { + continue; + } + + /* Atom has completed, propagate the error code if any */ + katom->event_code = dep_atom->event_code; + katom->status = KBASE_JD_ATOM_STATE_QUEUED; + + /* This atom is going through soft replay or + * will be sent back to user space. Do not record any + * dependencies. */ + KBASE_TLSTREAM_TL_NEW_ATOM( + katom, + kbase_jd_atom_id(kctx, katom)); + KBASE_TLSTREAM_TL_RET_ATOM_CTX(katom, kctx); + KBASE_TLSTREAM_TL_ATTRIB_ATOM_STATE(katom, + TL_ATOM_STATE_IDLE); + + if ((katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) + == BASE_JD_REQ_SOFT_REPLAY) { + if (kbase_replay_process(katom)) { + ret = false; + goto out; + } + } + will_fail = true; + + } else { + /* Atom is in progress, add this atom to the list */ + list_add_tail(&katom->dep_item[i], &dep_atom->dep_head[i]); + kbase_jd_katom_dep_set(&katom->dep[i], dep_atom, dep_atom_type); + queued = 1; + } + } + + if (will_fail) { + if (!queued) { + ret = jd_done_nolock(katom, NULL); + + goto out; + } else { + katom->will_fail_event_code = katom->event_code; + ret = false; + + goto out; + } + } else { + /* These must occur after the above loop to ensure that an atom + * that depends on a previous atom with the same number behaves + * as expected */ + katom->event_code = BASE_JD_EVENT_DONE; + katom->status = KBASE_JD_ATOM_STATE_QUEUED; + } + + /* For invalid priority, be most lenient and choose the default */ + sched_prio = kbasep_js_atom_prio_to_sched_prio(user_atom->prio); + if (sched_prio == KBASE_JS_ATOM_SCHED_PRIO_INVALID) + sched_prio = KBASE_JS_ATOM_SCHED_PRIO_DEFAULT; + katom->sched_priority = sched_prio; + + /* Create a new atom recording all dependencies it was set up with. */ + KBASE_TLSTREAM_TL_NEW_ATOM( + katom, + kbase_jd_atom_id(kctx, katom)); + KBASE_TLSTREAM_TL_ATTRIB_ATOM_STATE(katom, TL_ATOM_STATE_IDLE); + KBASE_TLSTREAM_TL_ATTRIB_ATOM_PRIORITY(katom, katom->sched_priority); + KBASE_TLSTREAM_TL_RET_ATOM_CTX(katom, kctx); + for (i = 0; i < 2; i++) + if (BASE_JD_DEP_TYPE_INVALID != kbase_jd_katom_dep_type( + &katom->dep[i])) { + KBASE_TLSTREAM_TL_DEP_ATOM_ATOM( + (void *)kbase_jd_katom_dep_atom( + &katom->dep[i]), + (void *)katom); + } else if (BASE_JD_DEP_TYPE_INVALID != + user_atom->pre_dep[i].dependency_type) { + /* Resolved dependency. */ + int dep_atom_number = + user_atom->pre_dep[i].atom_id; + struct kbase_jd_atom *dep_atom = + &jctx->atoms[dep_atom_number]; + + KBASE_TLSTREAM_TL_RDEP_ATOM_ATOM( + (void *)dep_atom, + (void *)katom); + } + + /* Reject atoms with job chain = NULL, as these cause issues with soft-stop */ + if (!katom->jc && (katom->core_req & BASE_JD_REQ_ATOM_TYPE) != BASE_JD_REQ_DEP) { + dev_warn(kctx->kbdev->dev, "Rejecting atom with jc = NULL"); + katom->event_code = BASE_JD_EVENT_JOB_INVALID; + ret = jd_done_nolock(katom, NULL); + goto out; + } + + /* Reject atoms with an invalid device_nr */ + if ((katom->core_req & BASE_JD_REQ_SPECIFIC_COHERENT_GROUP) && + (katom->device_nr >= kctx->kbdev->gpu_props.num_core_groups)) { + dev_warn(kctx->kbdev->dev, + "Rejecting atom with invalid device_nr %d", + katom->device_nr); + katom->event_code = BASE_JD_EVENT_JOB_INVALID; + ret = jd_done_nolock(katom, NULL); + goto out; + } + + /* Reject atoms with invalid core requirements */ + if ((katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES) && + (katom->core_req & BASE_JD_REQ_EVENT_COALESCE)) { + dev_warn(kctx->kbdev->dev, + "Rejecting atom with invalid core requirements"); + katom->event_code = BASE_JD_EVENT_JOB_INVALID; + katom->core_req &= ~BASE_JD_REQ_EVENT_COALESCE; + ret = jd_done_nolock(katom, NULL); + goto out; + } + + if (katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES) { + /* handle what we need to do to access the external resources */ + if (kbase_jd_pre_external_resources(katom, user_atom) != 0) { + /* setup failed (no access, bad resource, unknown resource types, etc.) */ + katom->event_code = BASE_JD_EVENT_JOB_INVALID; + ret = jd_done_nolock(katom, NULL); + goto out; + } + } + + /* Validate the atom. Function will return error if the atom is + * malformed. + * + * Soft-jobs never enter the job scheduler but have their own initialize method. + * + * If either fail then we immediately complete the atom with an error. + */ + if ((katom->core_req & BASE_JD_REQ_SOFT_JOB) == 0) { + if (!kbase_js_is_atom_valid(kctx->kbdev, katom)) { + katom->event_code = BASE_JD_EVENT_JOB_INVALID; + ret = jd_done_nolock(katom, NULL); + goto out; + } + } else { + /* Soft-job */ + if (kbase_prepare_soft_job(katom) != 0) { + katom->event_code = BASE_JD_EVENT_JOB_INVALID; + ret = jd_done_nolock(katom, NULL); + goto out; + } + } + +#ifdef CONFIG_GPU_TRACEPOINTS + katom->work_id = atomic_inc_return(&jctx->work_id); + trace_gpu_job_enqueue(kctx->id, katom->work_id, + kbasep_map_core_reqs_to_string(katom->core_req)); +#endif + + if (queued && !IS_GPU_ATOM(katom)) { + ret = false; + goto out; + } + +#ifdef CONFIG_MALI_DMA_FENCE + if (kbase_fence_dep_count_read(katom) != -1) { + ret = false; + goto out; + } +#endif /* CONFIG_MALI_DMA_FENCE */ + + if ((katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) + == BASE_JD_REQ_SOFT_REPLAY) { + if (kbase_replay_process(katom)) + ret = false; + else + ret = jd_done_nolock(katom, NULL); + + goto out; + } else if (katom->core_req & BASE_JD_REQ_SOFT_JOB) { + if (kbase_process_soft_job(katom) == 0) { + kbase_finish_soft_job(katom); + ret = jd_done_nolock(katom, NULL); + goto out; + } + + ret = false; + } else if ((katom->core_req & BASE_JD_REQ_ATOM_TYPE) != BASE_JD_REQ_DEP) { + katom->status = KBASE_JD_ATOM_STATE_IN_JS; + ret = kbasep_js_add_job(kctx, katom); + /* If job was cancelled then resolve immediately */ + if (katom->event_code == BASE_JD_EVENT_JOB_CANCELLED) + ret = jd_done_nolock(katom, NULL); + } else { + /* This is a pure dependency. Resolve it immediately */ + ret = jd_done_nolock(katom, NULL); + } + + out: + return ret; +} + +int kbase_jd_submit(struct kbase_context *kctx, + void __user *user_addr, u32 nr_atoms, u32 stride, + bool uk6_atom) +{ + struct kbase_jd_context *jctx = &kctx->jctx; + int err = 0; + int i; + bool need_to_try_schedule_context = false; + struct kbase_device *kbdev; + u32 latest_flush; + + /* + * kbase_jd_submit isn't expected to fail and so all errors with the + * jobs are reported by immediately failing them (through event system) + */ + kbdev = kctx->kbdev; + + beenthere(kctx, "%s", "Enter"); + + if (kbase_ctx_flag(kctx, KCTX_SUBMIT_DISABLED)) { + dev_err(kbdev->dev, "Attempt to submit to a context that has SUBMIT_DISABLED set on it"); + return -EINVAL; + } + + if (stride != sizeof(base_jd_atom_v2)) { + dev_err(kbdev->dev, "Stride passed to job_submit doesn't match kernel"); + return -EINVAL; + } + + KBASE_TIMELINE_ATOMS_IN_FLIGHT(kctx, atomic_add_return(nr_atoms, + &kctx->timeline.jd_atoms_in_flight)); + + /* All atoms submitted in this call have the same flush ID */ + latest_flush = kbase_backend_get_current_flush_id(kbdev); + + for (i = 0; i < nr_atoms; i++) { + struct base_jd_atom_v2 user_atom; + struct kbase_jd_atom *katom; + + if (copy_from_user(&user_atom, user_addr, + sizeof(user_atom)) != 0) { + err = -EINVAL; + KBASE_TIMELINE_ATOMS_IN_FLIGHT(kctx, + atomic_sub_return(nr_atoms - i, + &kctx->timeline.jd_atoms_in_flight)); + break; + } + +#ifdef BASE_LEGACY_UK10_2_SUPPORT + if (KBASE_API_VERSION(10, 3) > kctx->api_version) + user_atom.core_req = (u32)(user_atom.compat_core_req + & 0x7fff); +#endif /* BASE_LEGACY_UK10_2_SUPPORT */ + + user_addr = (void __user *)((uintptr_t) user_addr + stride); + + mutex_lock(&jctx->lock); +#ifndef compiletime_assert +#define compiletime_assert_defined +#define compiletime_assert(x, msg) do { switch (0) { case 0: case (x):; } } \ +while (false) +#endif + compiletime_assert((1 << (8*sizeof(user_atom.atom_number))) == + BASE_JD_ATOM_COUNT, + "BASE_JD_ATOM_COUNT and base_atom_id type out of sync"); + compiletime_assert(sizeof(user_atom.pre_dep[0].atom_id) == + sizeof(user_atom.atom_number), + "BASE_JD_ATOM_COUNT and base_atom_id type out of sync"); +#ifdef compiletime_assert_defined +#undef compiletime_assert +#undef compiletime_assert_defined +#endif + katom = &jctx->atoms[user_atom.atom_number]; + + /* Record the flush ID for the cache flush optimisation */ + katom->flush_id = latest_flush; + + while (katom->status != KBASE_JD_ATOM_STATE_UNUSED) { + /* Atom number is already in use, wait for the atom to + * complete + */ + mutex_unlock(&jctx->lock); + + /* This thread will wait for the atom to complete. Due + * to thread scheduling we are not sure that the other + * thread that owns the atom will also schedule the + * context, so we force the scheduler to be active and + * hence eventually schedule this context at some point + * later. + */ + kbase_js_sched_all(kbdev); + + if (wait_event_killable(katom->completed, + katom->status == + KBASE_JD_ATOM_STATE_UNUSED) != 0) { + /* We're being killed so the result code + * doesn't really matter + */ + return 0; + } + mutex_lock(&jctx->lock); + } + + need_to_try_schedule_context |= + jd_submit_atom(kctx, &user_atom, katom); + + /* Register a completed job as a disjoint event when the GPU is in a disjoint state + * (ie. being reset or replaying jobs). + */ + kbase_disjoint_event_potential(kbdev); + + mutex_unlock(&jctx->lock); + } + + if (need_to_try_schedule_context) + kbase_js_sched_all(kbdev); + + return err; +} + +KBASE_EXPORT_TEST_API(kbase_jd_submit); + +void kbase_jd_done_worker(struct work_struct *data) +{ + struct kbase_jd_atom *katom = container_of(data, struct kbase_jd_atom, work); + struct kbase_jd_context *jctx; + struct kbase_context *kctx; + struct kbasep_js_kctx_info *js_kctx_info; + struct kbase_device *kbdev; + struct kbasep_js_device_data *js_devdata; + u64 cache_jc = katom->jc; + struct kbasep_js_atom_retained_state katom_retained_state; + bool context_idle; + base_jd_core_req core_req = katom->core_req; + u64 affinity = katom->affinity; + enum kbase_atom_coreref_state coreref_state = katom->coreref_state; + + /* Soft jobs should never reach this function */ + KBASE_DEBUG_ASSERT((katom->core_req & BASE_JD_REQ_SOFT_JOB) == 0); + + kctx = katom->kctx; + jctx = &kctx->jctx; + kbdev = kctx->kbdev; + js_kctx_info = &kctx->jctx.sched_info; + js_devdata = &kbdev->js_data; + + KBASE_TRACE_ADD(kbdev, JD_DONE_WORKER, kctx, katom, katom->jc, 0); + + kbase_backend_complete_wq(kbdev, katom); + + /* + * Begin transaction on JD context and JS context + */ + mutex_lock(&jctx->lock); + KBASE_TLSTREAM_TL_ATTRIB_ATOM_STATE(katom, TL_ATOM_STATE_DONE); + mutex_lock(&js_devdata->queue_mutex); + mutex_lock(&js_kctx_info->ctx.jsctx_mutex); + + /* This worker only gets called on contexts that are scheduled *in*. This is + * because it only happens in response to an IRQ from a job that was + * running. + */ + KBASE_DEBUG_ASSERT(kbase_ctx_flag(kctx, KCTX_SCHEDULED)); + + if (katom->event_code == BASE_JD_EVENT_STOPPED) { + /* Atom has been promoted to stopped */ + unsigned long flags; + + mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); + mutex_unlock(&js_devdata->queue_mutex); + + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); + + katom->status = KBASE_JD_ATOM_STATE_IN_JS; + kbase_js_unpull(kctx, katom); + + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); + mutex_unlock(&jctx->lock); + + return; + } + + if (katom->event_code != BASE_JD_EVENT_DONE) + dev_err(kbdev->dev, + "t6xx: GPU fault 0x%02lx from job slot %d\n", + (unsigned long)katom->event_code, + katom->slot_nr); + + if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316)) + kbase_as_poking_timer_release_atom(kbdev, kctx, katom); + + /* Retain state before the katom disappears */ + kbasep_js_atom_retained_state_copy(&katom_retained_state, katom); + + context_idle = kbase_js_complete_atom_wq(kctx, katom); + + KBASE_DEBUG_ASSERT(kbasep_js_has_atom_finished(&katom_retained_state)); + + kbasep_js_remove_job(kbdev, kctx, katom); + mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); + mutex_unlock(&js_devdata->queue_mutex); + katom->atom_flags &= ~KBASE_KATOM_FLAG_HOLDING_CTX_REF; + /* jd_done_nolock() requires the jsctx_mutex lock to be dropped */ + jd_done_nolock(katom, &kctx->completed_jobs); + + /* katom may have been freed now, do not use! */ + + if (context_idle) { + unsigned long flags; + + context_idle = false; + mutex_lock(&js_devdata->queue_mutex); + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); + + /* If kbase_sched() has scheduled this context back in then + * KCTX_ACTIVE will have been set after we marked it as + * inactive, and another pm reference will have been taken, so + * drop our reference. But do not call kbase_jm_idle_ctx(), as + * the context is active and fast-starting is allowed. + * + * If an atom has been fast-started then kctx->atoms_pulled will + * be non-zero but KCTX_ACTIVE will still be false (as the + * previous pm reference has been inherited). Do NOT drop our + * reference, as it has been re-used, and leave the context as + * active. + * + * If no new atoms have been started then KCTX_ACTIVE will still + * be false and atoms_pulled will be zero, so drop the reference + * and call kbase_jm_idle_ctx(). + * + * As the checks are done under both the queue_mutex and + * hwaccess_lock is should be impossible for this to race + * with the scheduler code. + */ + if (kbase_ctx_flag(kctx, KCTX_ACTIVE) || + !atomic_read(&kctx->atoms_pulled)) { + /* Calling kbase_jm_idle_ctx() here will ensure that + * atoms are not fast-started when we drop the + * hwaccess_lock. This is not performed if + * KCTX_ACTIVE is set as in that case another pm + * reference has been taken and a fast-start would be + * valid. + */ + if (!kbase_ctx_flag(kctx, KCTX_ACTIVE)) + kbase_jm_idle_ctx(kbdev, kctx); + context_idle = true; + } else { + kbase_ctx_flag_set(kctx, KCTX_ACTIVE); + } + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); + mutex_unlock(&js_devdata->queue_mutex); + } + + /* + * Transaction complete + */ + mutex_unlock(&jctx->lock); + + /* Job is now no longer running, so can now safely release the context + * reference, and handle any actions that were logged against the atom's retained state */ + + kbasep_js_runpool_release_ctx_and_katom_retained_state(kbdev, kctx, &katom_retained_state); + + kbase_js_sched_all(kbdev); + + if (!atomic_dec_return(&kctx->work_count)) { + /* If worker now idle then post all events that jd_done_nolock() + * has queued */ + mutex_lock(&jctx->lock); + while (!list_empty(&kctx->completed_jobs)) { + struct kbase_jd_atom *atom = list_entry( + kctx->completed_jobs.next, + struct kbase_jd_atom, jd_item); + list_del(kctx->completed_jobs.next); + + kbase_event_post(kctx, atom); + } + mutex_unlock(&jctx->lock); + } + + kbase_backend_complete_wq_post_sched(kbdev, core_req, affinity, + coreref_state); + + if (context_idle) + kbase_pm_context_idle(kbdev); + + KBASE_TRACE_ADD(kbdev, JD_DONE_WORKER_END, kctx, NULL, cache_jc, 0); +} + +/** + * jd_cancel_worker - Work queue job cancel function. + * @data: a &struct work_struct + * + * Only called as part of 'Zapping' a context (which occurs on termination). + * Operates serially with the kbase_jd_done_worker() on the work queue. + * + * This can only be called on contexts that aren't scheduled. + * + * We don't need to release most of the resources that would occur on + * kbase_jd_done() or kbase_jd_done_worker(), because the atoms here must not be + * running (by virtue of only being called on contexts that aren't + * scheduled). + */ +static void jd_cancel_worker(struct work_struct *data) +{ + struct kbase_jd_atom *katom = container_of(data, struct kbase_jd_atom, work); + struct kbase_jd_context *jctx; + struct kbase_context *kctx; + struct kbasep_js_kctx_info *js_kctx_info; + bool need_to_try_schedule_context; + bool attr_state_changed; + struct kbase_device *kbdev; + + /* Soft jobs should never reach this function */ + KBASE_DEBUG_ASSERT((katom->core_req & BASE_JD_REQ_SOFT_JOB) == 0); + + kctx = katom->kctx; + kbdev = kctx->kbdev; + jctx = &kctx->jctx; + js_kctx_info = &kctx->jctx.sched_info; + + KBASE_TRACE_ADD(kbdev, JD_CANCEL_WORKER, kctx, katom, katom->jc, 0); + + /* This only gets called on contexts that are scheduled out. Hence, we must + * make sure we don't de-ref the number of running jobs (there aren't + * any), nor must we try to schedule out the context (it's already + * scheduled out). + */ + KBASE_DEBUG_ASSERT(!kbase_ctx_flag(kctx, KCTX_SCHEDULED)); + + /* Scheduler: Remove the job from the system */ + mutex_lock(&js_kctx_info->ctx.jsctx_mutex); + attr_state_changed = kbasep_js_remove_cancelled_job(kbdev, kctx, katom); + mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); + + mutex_lock(&jctx->lock); + + need_to_try_schedule_context = jd_done_nolock(katom, NULL); + /* Because we're zapping, we're not adding any more jobs to this ctx, so no need to + * schedule the context. There's also no need for the jsctx_mutex to have been taken + * around this too. */ + KBASE_DEBUG_ASSERT(!need_to_try_schedule_context); + + /* katom may have been freed now, do not use! */ + mutex_unlock(&jctx->lock); + + if (attr_state_changed) + kbase_js_sched_all(kbdev); +} + +/** + * kbase_jd_done - Complete a job that has been removed from the Hardware + * @katom: atom which has been completed + * @slot_nr: slot the atom was on + * @end_timestamp: completion time + * @done_code: completion code + * + * This must be used whenever a job has been removed from the Hardware, e.g.: + * An IRQ indicates that the job finished (for both error and 'done' codes), or + * the job was evicted from the JS_HEAD_NEXT registers during a Soft/Hard stop. + * + * Some work is carried out immediately, and the rest is deferred onto a + * workqueue + * + * Context: + * This can be called safely from atomic context. + * The caller must hold kbdev->hwaccess_lock + */ +void kbase_jd_done(struct kbase_jd_atom *katom, int slot_nr, + ktime_t *end_timestamp, kbasep_js_atom_done_code done_code) +{ + struct kbase_context *kctx; + struct kbase_device *kbdev; + + KBASE_DEBUG_ASSERT(katom); + kctx = katom->kctx; + KBASE_DEBUG_ASSERT(kctx); + kbdev = kctx->kbdev; + KBASE_DEBUG_ASSERT(kbdev); + + if (done_code & KBASE_JS_ATOM_DONE_EVICTED_FROM_NEXT) + katom->event_code = BASE_JD_EVENT_REMOVED_FROM_NEXT; + + KBASE_TRACE_ADD(kbdev, JD_DONE, kctx, katom, katom->jc, 0); + + kbase_job_check_leave_disjoint(kbdev, katom); + + katom->slot_nr = slot_nr; + + atomic_inc(&kctx->work_count); + +#ifdef CONFIG_DEBUG_FS + /* a failed job happened and is waiting for dumping*/ + if (!katom->will_fail_event_code && + kbase_debug_job_fault_process(katom, katom->event_code)) + return; +#endif + + WARN_ON(work_pending(&katom->work)); + INIT_WORK(&katom->work, kbase_jd_done_worker); + queue_work(kctx->jctx.job_done_wq, &katom->work); +} + +KBASE_EXPORT_TEST_API(kbase_jd_done); + +void kbase_jd_cancel(struct kbase_device *kbdev, struct kbase_jd_atom *katom) +{ + struct kbase_context *kctx; + + KBASE_DEBUG_ASSERT(NULL != kbdev); + KBASE_DEBUG_ASSERT(NULL != katom); + kctx = katom->kctx; + KBASE_DEBUG_ASSERT(NULL != kctx); + + KBASE_TRACE_ADD(kbdev, JD_CANCEL, kctx, katom, katom->jc, 0); + + /* This should only be done from a context that is not scheduled */ + KBASE_DEBUG_ASSERT(!kbase_ctx_flag(kctx, KCTX_SCHEDULED)); + + WARN_ON(work_pending(&katom->work)); + + katom->event_code = BASE_JD_EVENT_JOB_CANCELLED; + + INIT_WORK(&katom->work, jd_cancel_worker); + queue_work(kctx->jctx.job_done_wq, &katom->work); +} + + +void kbase_jd_zap_context(struct kbase_context *kctx) +{ + struct kbase_jd_atom *katom; + struct list_head *entry, *tmp; + struct kbase_device *kbdev; + + KBASE_DEBUG_ASSERT(kctx); + + kbdev = kctx->kbdev; + + KBASE_TRACE_ADD(kbdev, JD_ZAP_CONTEXT, kctx, NULL, 0u, 0u); + + kbase_js_zap_context(kctx); + + mutex_lock(&kctx->jctx.lock); + + /* + * While holding the struct kbase_jd_context lock clean up jobs which are known to kbase but are + * queued outside the job scheduler. + */ + + del_timer_sync(&kctx->soft_job_timeout); + list_for_each_safe(entry, tmp, &kctx->waiting_soft_jobs) { + katom = list_entry(entry, struct kbase_jd_atom, queue); + kbase_cancel_soft_job(katom); + } + + +#ifdef CONFIG_MALI_DMA_FENCE + kbase_dma_fence_cancel_all_atoms(kctx); +#endif + + mutex_unlock(&kctx->jctx.lock); + +#ifdef CONFIG_MALI_DMA_FENCE + /* Flush dma-fence workqueue to ensure that any callbacks that may have + * been queued are done before continuing. + */ + flush_workqueue(kctx->dma_fence.wq); +#endif + + kbase_jm_wait_for_zero_jobs(kctx); +} + +KBASE_EXPORT_TEST_API(kbase_jd_zap_context); + +int kbase_jd_init(struct kbase_context *kctx) +{ + int i; + int mali_err = 0; + + KBASE_DEBUG_ASSERT(kctx); + + kctx->jctx.job_done_wq = alloc_workqueue("mali_jd", + WQ_HIGHPRI | WQ_UNBOUND, 1); + if (NULL == kctx->jctx.job_done_wq) { + mali_err = -ENOMEM; + goto out1; + } + + for (i = 0; i < BASE_JD_ATOM_COUNT; i++) { + init_waitqueue_head(&kctx->jctx.atoms[i].completed); + + INIT_LIST_HEAD(&kctx->jctx.atoms[i].dep_head[0]); + INIT_LIST_HEAD(&kctx->jctx.atoms[i].dep_head[1]); + + /* Catch userspace attempting to use an atom which doesn't exist as a pre-dependency */ + kctx->jctx.atoms[i].event_code = BASE_JD_EVENT_JOB_INVALID; + kctx->jctx.atoms[i].status = KBASE_JD_ATOM_STATE_UNUSED; + +#if defined(CONFIG_MALI_DMA_FENCE) || defined(CONFIG_SYNC_FILE) + kctx->jctx.atoms[i].dma_fence.context = + dma_fence_context_alloc(1); + atomic_set(&kctx->jctx.atoms[i].dma_fence.seqno, 0); + INIT_LIST_HEAD(&kctx->jctx.atoms[i].dma_fence.callbacks); +#endif + } + + mutex_init(&kctx->jctx.lock); + + init_waitqueue_head(&kctx->jctx.zero_jobs_wait); + + spin_lock_init(&kctx->jctx.tb_lock); + + kctx->jctx.job_nr = 0; + INIT_LIST_HEAD(&kctx->completed_jobs); + atomic_set(&kctx->work_count, 0); + + return 0; + + out1: + return mali_err; +} + +KBASE_EXPORT_TEST_API(kbase_jd_init); + +void kbase_jd_exit(struct kbase_context *kctx) +{ + KBASE_DEBUG_ASSERT(kctx); + + /* Work queue is emptied by this */ + destroy_workqueue(kctx->jctx.job_done_wq); +} + +KBASE_EXPORT_TEST_API(kbase_jd_exit); diff --git a/drivers/gpu/arm/midgard/mali_kbase_jd_debugfs.c b/drivers/gpu/arm/midgard/mali_kbase_jd_debugfs.c new file mode 100644 index 00000000000000..25f1ed5b55c840 --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_jd_debugfs.c @@ -0,0 +1,236 @@ +/* + * + * (C) COPYRIGHT 2014-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +#ifdef CONFIG_DEBUG_FS + +#include +#include +#include +#include +#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE) +#include +#endif +#include + +struct kbase_jd_debugfs_depinfo { + u8 id; + char type; +}; + +static void kbase_jd_debugfs_fence_info(struct kbase_jd_atom *atom, + struct seq_file *sfile) +{ +#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE) + struct kbase_sync_fence_info info; + int res; + + switch (atom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) { + case BASE_JD_REQ_SOFT_FENCE_TRIGGER: + res = kbase_sync_fence_out_info_get(atom, &info); + if (0 == res) { + seq_printf(sfile, "Sa([%p]%d) ", + info.fence, info.status); + break; + } + case BASE_JD_REQ_SOFT_FENCE_WAIT: + res = kbase_sync_fence_in_info_get(atom, &info); + if (0 == res) { + seq_printf(sfile, "Wa([%p]%d) ", + info.fence, info.status); + break; + } + default: + break; + } +#endif /* CONFIG_SYNC || CONFIG_SYNC_FILE */ + +#ifdef CONFIG_MALI_DMA_FENCE + if (atom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES) { + struct kbase_fence_cb *cb; + + if (atom->dma_fence.fence) { +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)) + struct fence *fence = atom->dma_fence.fence; +#else + struct dma_fence *fence = atom->dma_fence.fence; +#endif + + seq_printf(sfile, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) + "Sd(%u#%u: %s) ", +#else + "Sd(%llu#%u: %s) ", +#endif + fence->context, + fence->seqno, + dma_fence_is_signaled(fence) ? + "signaled" : "active"); + } + + list_for_each_entry(cb, &atom->dma_fence.callbacks, + node) { +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)) + struct fence *fence = cb->fence; +#else + struct dma_fence *fence = cb->fence; +#endif + + seq_printf(sfile, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) + "Wd(%u#%u: %s) ", +#else + "Wd(%llu#%u: %s) ", +#endif + fence->context, + fence->seqno, + dma_fence_is_signaled(fence) ? + "signaled" : "active"); + } + } +#endif /* CONFIG_MALI_DMA_FENCE */ + +} + +static void kbasep_jd_debugfs_atom_deps( + struct kbase_jd_debugfs_depinfo *deps, + struct kbase_jd_atom *atom) +{ + struct kbase_context *kctx = atom->kctx; + int i; + + for (i = 0; i < 2; i++) { + deps[i].id = (unsigned)(atom->dep[i].atom ? + kbase_jd_atom_id(kctx, atom->dep[i].atom) : 0); + + switch (atom->dep[i].dep_type) { + case BASE_JD_DEP_TYPE_INVALID: + deps[i].type = ' '; + break; + case BASE_JD_DEP_TYPE_DATA: + deps[i].type = 'D'; + break; + case BASE_JD_DEP_TYPE_ORDER: + deps[i].type = '>'; + break; + default: + deps[i].type = '?'; + break; + } + } +} +/** + * kbasep_jd_debugfs_atoms_show - Show callback for the JD atoms debugfs file. + * @sfile: The debugfs entry + * @data: Data associated with the entry + * + * This function is called to get the contents of the JD atoms debugfs file. + * This is a report of all atoms managed by kbase_jd_context.atoms + * + * Return: 0 if successfully prints data in debugfs entry file, failure + * otherwise + */ +static int kbasep_jd_debugfs_atoms_show(struct seq_file *sfile, void *data) +{ + struct kbase_context *kctx = sfile->private; + struct kbase_jd_atom *atoms; + unsigned long irq_flags; + int i; + + KBASE_DEBUG_ASSERT(kctx != NULL); + + /* Print version */ + seq_printf(sfile, "v%u\n", MALI_JD_DEBUGFS_VERSION); + + /* Print U/K API version */ + seq_printf(sfile, "ukv%u.%u\n", BASE_UK_VERSION_MAJOR, + BASE_UK_VERSION_MINOR); + + /* Print table heading */ + seq_puts(sfile, " ID, Core req, St, CR, Predeps, Start time, Additional info...\n"); + + atoms = kctx->jctx.atoms; + /* General atom states */ + mutex_lock(&kctx->jctx.lock); + /* JS-related states */ + spin_lock_irqsave(&kctx->kbdev->hwaccess_lock, irq_flags); + for (i = 0; i != BASE_JD_ATOM_COUNT; ++i) { + struct kbase_jd_atom *atom = &atoms[i]; + s64 start_timestamp = 0; + struct kbase_jd_debugfs_depinfo deps[2]; + + if (atom->status == KBASE_JD_ATOM_STATE_UNUSED) + continue; + + /* start_timestamp is cleared as soon as the atom leaves UNUSED state + * and set before a job is submitted to the h/w, a non-zero value means + * it is valid */ + if (ktime_to_ns(atom->start_timestamp)) + start_timestamp = ktime_to_ns( + ktime_sub(ktime_get(), atom->start_timestamp)); + + kbasep_jd_debugfs_atom_deps(deps, atom); + + seq_printf(sfile, + "%3u, %8x, %2u, %2u, %c%3u %c%3u, %20lld, ", + i, atom->core_req, atom->status, + atom->coreref_state, + deps[0].type, deps[0].id, + deps[1].type, deps[1].id, + start_timestamp); + + + kbase_jd_debugfs_fence_info(atom, sfile); + + seq_puts(sfile, "\n"); + } + spin_unlock_irqrestore(&kctx->kbdev->hwaccess_lock, irq_flags); + mutex_unlock(&kctx->jctx.lock); + + return 0; +} + + +/** + * kbasep_jd_debugfs_atoms_open - open operation for atom debugfs file + * @in: &struct inode pointer + * @file: &struct file pointer + * + * Return: file descriptor + */ +static int kbasep_jd_debugfs_atoms_open(struct inode *in, struct file *file) +{ + return single_open(file, kbasep_jd_debugfs_atoms_show, in->i_private); +} + +static const struct file_operations kbasep_jd_debugfs_atoms_fops = { + .open = kbasep_jd_debugfs_atoms_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +void kbasep_jd_debugfs_ctx_init(struct kbase_context *kctx) +{ + KBASE_DEBUG_ASSERT(kctx != NULL); + + /* Expose all atoms */ + debugfs_create_file("atoms", S_IRUGO, kctx->kctx_dentry, kctx, + &kbasep_jd_debugfs_atoms_fops); + +} + +#endif /* CONFIG_DEBUG_FS */ diff --git a/drivers/gpu/arm/midgard/mali_kbase_jd_debugfs.h b/drivers/gpu/arm/midgard/mali_kbase_jd_debugfs.h new file mode 100644 index 00000000000000..fae32919b22f19 --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_jd_debugfs.h @@ -0,0 +1,40 @@ +/* + * + * (C) COPYRIGHT 2014-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +/** + * @file mali_kbase_jd_debugfs.h + * Header file for job dispatcher-related entries in debugfs + */ + +#ifndef _KBASE_JD_DEBUGFS_H +#define _KBASE_JD_DEBUGFS_H + +#include + +#define MALI_JD_DEBUGFS_VERSION 2 + +/* Forward declarations */ +struct kbase_context; + +/** + * kbasep_jd_debugfs_ctx_init() - Add debugfs entries for JD system + * + * @kctx Pointer to kbase_context + */ +void kbasep_jd_debugfs_ctx_init(struct kbase_context *kctx); + +#endif /*_KBASE_JD_DEBUGFS_H*/ diff --git a/drivers/gpu/arm/midgard/mali_kbase_jm.c b/drivers/gpu/arm/midgard/mali_kbase_jm.c new file mode 100644 index 00000000000000..0c5c6a6f78cb39 --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_jm.c @@ -0,0 +1,131 @@ +/* + * + * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + +/* + * HW access job manager common APIs + */ + +#include +#include "mali_kbase_hwaccess_jm.h" +#include "mali_kbase_jm.h" + +/** + * kbase_jm_next_job() - Attempt to run the next @nr_jobs_to_submit jobs on slot + * @js on the active context. + * @kbdev: Device pointer + * @js: Job slot to run on + * @nr_jobs_to_submit: Number of jobs to attempt to submit + * + * Return: true if slot can still be submitted on, false if slot is now full. + */ +static bool kbase_jm_next_job(struct kbase_device *kbdev, int js, + int nr_jobs_to_submit) +{ + struct kbase_context *kctx; + int i; + + kctx = kbdev->hwaccess.active_kctx; + + if (!kctx) + return true; + + for (i = 0; i < nr_jobs_to_submit; i++) { + struct kbase_jd_atom *katom = kbase_js_pull(kctx, js); + + if (!katom) + return true; /* Context has no jobs on this slot */ + + kbase_backend_run_atom(kbdev, katom); + } + + return false; /* Slot ringbuffer should now be full */ +} + +u32 kbase_jm_kick(struct kbase_device *kbdev, u32 js_mask) +{ + u32 ret_mask = 0; + + lockdep_assert_held(&kbdev->hwaccess_lock); + + while (js_mask) { + int js = ffs(js_mask) - 1; + int nr_jobs_to_submit = kbase_backend_slot_free(kbdev, js); + + if (kbase_jm_next_job(kbdev, js, nr_jobs_to_submit)) + ret_mask |= (1 << js); + + js_mask &= ~(1 << js); + } + + return ret_mask; +} + +void kbase_jm_try_kick(struct kbase_device *kbdev, u32 js_mask) +{ + struct kbasep_js_device_data *js_devdata = &kbdev->js_data; + + lockdep_assert_held(&kbdev->hwaccess_lock); + + if (!down_trylock(&js_devdata->schedule_sem)) { + kbase_jm_kick(kbdev, js_mask); + up(&js_devdata->schedule_sem); + } +} + +void kbase_jm_try_kick_all(struct kbase_device *kbdev) +{ + struct kbasep_js_device_data *js_devdata = &kbdev->js_data; + + lockdep_assert_held(&kbdev->hwaccess_lock); + + if (!down_trylock(&js_devdata->schedule_sem)) { + kbase_jm_kick_all(kbdev); + up(&js_devdata->schedule_sem); + } +} + +void kbase_jm_idle_ctx(struct kbase_device *kbdev, struct kbase_context *kctx) +{ + lockdep_assert_held(&kbdev->hwaccess_lock); + + if (kbdev->hwaccess.active_kctx == kctx) + kbdev->hwaccess.active_kctx = NULL; +} + +struct kbase_jd_atom *kbase_jm_return_atom_to_js(struct kbase_device *kbdev, + struct kbase_jd_atom *katom) +{ + lockdep_assert_held(&kbdev->hwaccess_lock); + + if (katom->event_code != BASE_JD_EVENT_STOPPED && + katom->event_code != BASE_JD_EVENT_REMOVED_FROM_NEXT) { + return kbase_js_complete_atom(katom, NULL); + } else { + kbase_js_unpull(katom->kctx, katom); + return NULL; + } +} + +struct kbase_jd_atom *kbase_jm_complete(struct kbase_device *kbdev, + struct kbase_jd_atom *katom, ktime_t *end_timestamp) +{ + lockdep_assert_held(&kbdev->hwaccess_lock); + + return kbase_js_complete_atom(katom, end_timestamp); +} + diff --git a/drivers/gpu/arm/midgard/mali_kbase_jm.h b/drivers/gpu/arm/midgard/mali_kbase_jm.h new file mode 100644 index 00000000000000..a74ee24c8058fa --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_jm.h @@ -0,0 +1,110 @@ +/* + * + * (C) COPYRIGHT 2014, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + +/* + * Job manager common APIs + */ + +#ifndef _KBASE_JM_H_ +#define _KBASE_JM_H_ + +/** + * kbase_jm_kick() - Indicate that there are jobs ready to run. + * @kbdev: Device pointer + * @js_mask: Mask of the job slots that can be pulled from. + * + * Caller must hold the hwaccess_lock and schedule_sem semaphore + * + * Return: Mask of the job slots that can still be submitted to. + */ +u32 kbase_jm_kick(struct kbase_device *kbdev, u32 js_mask); + +/** + * kbase_jm_kick_all() - Indicate that there are jobs ready to run on all job + * slots. + * @kbdev: Device pointer + * + * Caller must hold the hwaccess_lock and schedule_sem semaphore + * + * Return: Mask of the job slots that can still be submitted to. + */ +static inline u32 kbase_jm_kick_all(struct kbase_device *kbdev) +{ + return kbase_jm_kick(kbdev, (1 << kbdev->gpu_props.num_job_slots) - 1); +} + +/** + * kbase_jm_try_kick - Attempt to call kbase_jm_kick + * @kbdev: Device pointer + * @js_mask: Mask of the job slots that can be pulled from + * Context: Caller must hold hwaccess_lock + * + * If schedule_sem can be immediately obtained then this function will call + * kbase_jm_kick() otherwise it will do nothing. + */ +void kbase_jm_try_kick(struct kbase_device *kbdev, u32 js_mask); + +/** + * kbase_jm_try_kick_all() - Attempt to call kbase_jm_kick_all + * @kbdev: Device pointer + * Context: Caller must hold hwaccess_lock + * + * If schedule_sem can be immediately obtained then this function will call + * kbase_jm_kick_all() otherwise it will do nothing. + */ +void kbase_jm_try_kick_all(struct kbase_device *kbdev); + +/** + * kbase_jm_idle_ctx() - Mark a context as idle. + * @kbdev: Device pointer + * @kctx: Context to mark as idle + * + * No more atoms will be pulled from this context until it is marked as active + * by kbase_js_use_ctx(). + * + * The context should have no atoms currently pulled from it + * (kctx->atoms_pulled == 0). + * + * Caller must hold the hwaccess_lock + */ +void kbase_jm_idle_ctx(struct kbase_device *kbdev, struct kbase_context *kctx); + +/** + * kbase_jm_return_atom_to_js() - Return an atom to the job scheduler that has + * been soft-stopped or will fail due to a + * dependency + * @kbdev: Device pointer + * @katom: Atom that has been stopped or will be failed + * + * Return: Atom that has now been unblocked and can now be run, or NULL if none + */ +struct kbase_jd_atom *kbase_jm_return_atom_to_js(struct kbase_device *kbdev, + struct kbase_jd_atom *katom); + +/** + * kbase_jm_complete() - Complete an atom + * @kbdev: Device pointer + * @katom: Atom that has completed + * @end_timestamp: Timestamp of atom completion + * + * Return: Atom that has now been unblocked and can now be run, or NULL if none + */ +struct kbase_jd_atom *kbase_jm_complete(struct kbase_device *kbdev, + struct kbase_jd_atom *katom, ktime_t *end_timestamp); + +#endif /* _KBASE_JM_H_ */ diff --git a/drivers/gpu/arm/midgard/mali_kbase_js.c b/drivers/gpu/arm/midgard/mali_kbase_js.c new file mode 100644 index 00000000000000..a3e8248cfb770d --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_js.c @@ -0,0 +1,2789 @@ +/* + * + * (C) COPYRIGHT 2011-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + + +/* + * Job Scheduler Implementation + */ +#include +#include +#if defined(CONFIG_MALI_GATOR_SUPPORT) +#include +#endif +#include +#include +#include + +#include +#include + +#include "mali_kbase_jm.h" +#include "mali_kbase_hwaccess_jm.h" + +/* + * Private types + */ + +/* Bitpattern indicating the result of releasing a context */ +enum { + /* The context was descheduled - caller should try scheduling in a new + * one to keep the runpool full */ + KBASEP_JS_RELEASE_RESULT_WAS_DESCHEDULED = (1u << 0), + /* Ctx attributes were changed - caller should try scheduling all + * contexts */ + KBASEP_JS_RELEASE_RESULT_SCHED_ALL = (1u << 1) +}; + +typedef u32 kbasep_js_release_result; + +const int kbasep_js_atom_priority_to_relative[BASE_JD_NR_PRIO_LEVELS] = { + KBASE_JS_ATOM_SCHED_PRIO_MED, /* BASE_JD_PRIO_MEDIUM */ + KBASE_JS_ATOM_SCHED_PRIO_HIGH, /* BASE_JD_PRIO_HIGH */ + KBASE_JS_ATOM_SCHED_PRIO_LOW /* BASE_JD_PRIO_LOW */ +}; + +const base_jd_prio +kbasep_js_relative_priority_to_atom[KBASE_JS_ATOM_SCHED_PRIO_COUNT] = { + BASE_JD_PRIO_HIGH, /* KBASE_JS_ATOM_SCHED_PRIO_HIGH */ + BASE_JD_PRIO_MEDIUM, /* KBASE_JS_ATOM_SCHED_PRIO_MED */ + BASE_JD_PRIO_LOW /* KBASE_JS_ATOM_SCHED_PRIO_LOW */ +}; + + +/* + * Private function prototypes + */ +static kbasep_js_release_result kbasep_js_runpool_release_ctx_internal( + struct kbase_device *kbdev, struct kbase_context *kctx, + struct kbasep_js_atom_retained_state *katom_retained_state); + +static int kbase_js_get_slot(struct kbase_device *kbdev, + struct kbase_jd_atom *katom); + +static void kbase_js_foreach_ctx_job(struct kbase_context *kctx, + kbasep_js_ctx_job_cb callback); + +/* Helper for trace subcodes */ +#if KBASE_TRACE_ENABLE +static int kbasep_js_trace_get_refcnt(struct kbase_device *kbdev, + struct kbase_context *kctx) +{ + return atomic_read(&kctx->refcount); +} +#else /* KBASE_TRACE_ENABLE */ +static int kbasep_js_trace_get_refcnt(struct kbase_device *kbdev, + struct kbase_context *kctx) +{ + CSTD_UNUSED(kbdev); + CSTD_UNUSED(kctx); + return 0; +} +#endif /* KBASE_TRACE_ENABLE */ + +/* + * Private functions + */ + +/** + * core_reqs_from_jsn_features - Convert JSn_FEATURES to core requirements + * @features: JSn_FEATURE register value + * + * Given a JSn_FEATURE register value returns the core requirements that match + * + * Return: Core requirement bit mask + */ +static base_jd_core_req core_reqs_from_jsn_features(u16 features) +{ + base_jd_core_req core_req = 0u; + + if ((features & JS_FEATURE_SET_VALUE_JOB) != 0) + core_req |= BASE_JD_REQ_V; + + if ((features & JS_FEATURE_CACHE_FLUSH_JOB) != 0) + core_req |= BASE_JD_REQ_CF; + + if ((features & JS_FEATURE_COMPUTE_JOB) != 0) + core_req |= BASE_JD_REQ_CS; + + if ((features & JS_FEATURE_TILER_JOB) != 0) + core_req |= BASE_JD_REQ_T; + + if ((features & JS_FEATURE_FRAGMENT_JOB) != 0) + core_req |= BASE_JD_REQ_FS; + + return core_req; +} + +static void kbase_js_sync_timers(struct kbase_device *kbdev) +{ + mutex_lock(&kbdev->js_data.runpool_mutex); + kbase_backend_ctx_count_changed(kbdev); + mutex_unlock(&kbdev->js_data.runpool_mutex); +} + +/* Hold the mmu_hw_mutex and hwaccess_lock for this */ +bool kbasep_js_runpool_retain_ctx_nolock(struct kbase_device *kbdev, + struct kbase_context *kctx) +{ + struct kbasep_js_device_data *js_devdata; + bool result = false; + int as_nr; + + KBASE_DEBUG_ASSERT(kbdev != NULL); + KBASE_DEBUG_ASSERT(kctx != NULL); + js_devdata = &kbdev->js_data; + + lockdep_assert_held(&kbdev->hwaccess_lock); + + as_nr = kctx->as_nr; + if (atomic_read(&kctx->refcount) > 0) { + KBASE_DEBUG_ASSERT(as_nr >= 0); + + kbase_ctx_sched_retain_ctx_refcount(kctx); + KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_RETAIN_CTX_NOLOCK, kctx, + NULL, 0u, atomic_read(&kctx->refcount)); + result = true; + } + + return result; +} + +/** + * jsctx_rb_none_to_pull_prio(): - Check if there are no pullable atoms + * @kctx: Pointer to kbase context with ring buffer. + * @js: Job slot id to check. + * @prio: Priority to check. + * + * Return true if there are no atoms to pull. There may be running atoms in the + * ring buffer even if there are no atoms to pull. It is also possible for the + * ring buffer to be full (with running atoms) when this functions returns + * true. + * + * Return: true if there are no atoms to pull, false otherwise. + */ +static inline bool +jsctx_rb_none_to_pull_prio(struct kbase_context *kctx, int js, int prio) +{ + struct jsctx_queue *rb = &kctx->jsctx_queue[prio][js]; + + lockdep_assert_held(&kctx->kbdev->hwaccess_lock); + + return RB_EMPTY_ROOT(&rb->runnable_tree); +} + +/** + * jsctx_rb_none_to_pull(): - Check if all priority ring buffers have no + * pullable atoms + * @kctx: Pointer to kbase context with ring buffer. + * @js: Job slot id to check. + * + * Caller must hold hwaccess_lock + * + * Return: true if the ring buffers for all priorities have no pullable atoms, + * false otherwise. + */ +static inline bool +jsctx_rb_none_to_pull(struct kbase_context *kctx, int js) +{ + int prio; + + lockdep_assert_held(&kctx->kbdev->hwaccess_lock); + + for (prio = 0; prio < KBASE_JS_ATOM_SCHED_PRIO_COUNT; prio++) { + if (!jsctx_rb_none_to_pull_prio(kctx, js, prio)) + return false; + } + + return true; +} + +/** + * jsctx_queue_foreach_prio(): - Execute callback for each entry in the queue. + * @kctx: Pointer to kbase context with the queue. + * @js: Job slot id to iterate. + * @prio: Priority id to iterate. + * @callback: Function pointer to callback. + * + * Iterate over a queue and invoke @callback for each entry in the queue, and + * remove the entry from the queue. + * + * If entries are added to the queue while this is running those entries may, or + * may not be covered. To ensure that all entries in the buffer have been + * enumerated when this function returns jsctx->lock must be held when calling + * this function. + * + * The HW access lock must always be held when calling this function. + */ +static void +jsctx_queue_foreach_prio(struct kbase_context *kctx, int js, int prio, + kbasep_js_ctx_job_cb callback) +{ + struct jsctx_queue *queue = &kctx->jsctx_queue[prio][js]; + + lockdep_assert_held(&kctx->kbdev->hwaccess_lock); + + while (!RB_EMPTY_ROOT(&queue->runnable_tree)) { + struct rb_node *node = rb_first(&queue->runnable_tree); + struct kbase_jd_atom *entry = rb_entry(node, + struct kbase_jd_atom, runnable_tree_node); + + rb_erase(node, &queue->runnable_tree); + callback(kctx->kbdev, entry); + } + + while (!list_empty(&queue->x_dep_head)) { + struct kbase_jd_atom *entry = list_entry(queue->x_dep_head.next, + struct kbase_jd_atom, queue); + + list_del(queue->x_dep_head.next); + + callback(kctx->kbdev, entry); + } +} + +/** + * jsctx_queue_foreach(): - Execute callback for each entry in every queue + * @kctx: Pointer to kbase context with queue. + * @js: Job slot id to iterate. + * @callback: Function pointer to callback. + * + * Iterate over all the different priorities, and for each call + * jsctx_queue_foreach_prio() to iterate over the queue and invoke @callback + * for each entry, and remove the entry from the queue. + */ +static inline void +jsctx_queue_foreach(struct kbase_context *kctx, int js, + kbasep_js_ctx_job_cb callback) +{ + int prio; + + for (prio = 0; prio < KBASE_JS_ATOM_SCHED_PRIO_COUNT; prio++) + jsctx_queue_foreach_prio(kctx, js, prio, callback); +} + +/** + * jsctx_rb_peek_prio(): - Check buffer and get next atom + * @kctx: Pointer to kbase context with ring buffer. + * @js: Job slot id to check. + * @prio: Priority id to check. + * + * Check the ring buffer for the specified @js and @prio and return a pointer to + * the next atom, unless the ring buffer is empty. + * + * Return: Pointer to next atom in buffer, or NULL if there is no atom. + */ +static inline struct kbase_jd_atom * +jsctx_rb_peek_prio(struct kbase_context *kctx, int js, int prio) +{ + struct jsctx_queue *rb = &kctx->jsctx_queue[prio][js]; + struct rb_node *node; + + lockdep_assert_held(&kctx->kbdev->hwaccess_lock); + + node = rb_first(&rb->runnable_tree); + if (!node) + return NULL; + + return rb_entry(node, struct kbase_jd_atom, runnable_tree_node); +} + +/** + * jsctx_rb_peek(): - Check all priority buffers and get next atom + * @kctx: Pointer to kbase context with ring buffer. + * @js: Job slot id to check. + * + * Check the ring buffers for all priorities, starting from + * KBASE_JS_ATOM_SCHED_PRIO_HIGH, for the specified @js and @prio and return a + * pointer to the next atom, unless all the priority's ring buffers are empty. + * + * Caller must hold the hwaccess_lock. + * + * Return: Pointer to next atom in buffer, or NULL if there is no atom. + */ +static inline struct kbase_jd_atom * +jsctx_rb_peek(struct kbase_context *kctx, int js) +{ + int prio; + + lockdep_assert_held(&kctx->kbdev->hwaccess_lock); + + for (prio = 0; prio < KBASE_JS_ATOM_SCHED_PRIO_COUNT; prio++) { + struct kbase_jd_atom *katom; + + katom = jsctx_rb_peek_prio(kctx, js, prio); + if (katom) + return katom; + } + + return NULL; +} + +/** + * jsctx_rb_pull(): - Mark atom in list as running + * @kctx: Pointer to kbase context with ring buffer. + * @katom: Pointer to katom to pull. + * + * Mark an atom previously obtained from jsctx_rb_peek() as running. + * + * @katom must currently be at the head of the ring buffer. + */ +static inline void +jsctx_rb_pull(struct kbase_context *kctx, struct kbase_jd_atom *katom) +{ + int prio = katom->sched_priority; + int js = katom->slot_nr; + struct jsctx_queue *rb = &kctx->jsctx_queue[prio][js]; + + lockdep_assert_held(&kctx->kbdev->hwaccess_lock); + + /* Atoms must be pulled in the correct order. */ + WARN_ON(katom != jsctx_rb_peek_prio(kctx, js, prio)); + + rb_erase(&katom->runnable_tree_node, &rb->runnable_tree); +} + +#define LESS_THAN_WRAP(a, b) ((s32)(a - b) < 0) + +static void +jsctx_tree_add(struct kbase_context *kctx, struct kbase_jd_atom *katom) +{ + int prio = katom->sched_priority; + int js = katom->slot_nr; + struct jsctx_queue *queue = &kctx->jsctx_queue[prio][js]; + struct rb_node **new = &(queue->runnable_tree.rb_node), *parent = NULL; + + lockdep_assert_held(&kctx->kbdev->hwaccess_lock); + + while (*new) { + struct kbase_jd_atom *entry = container_of(*new, + struct kbase_jd_atom, runnable_tree_node); + + parent = *new; + if (LESS_THAN_WRAP(katom->age, entry->age)) + new = &((*new)->rb_left); + else + new = &((*new)->rb_right); + } + + /* Add new node and rebalance tree. */ + rb_link_node(&katom->runnable_tree_node, parent, new); + rb_insert_color(&katom->runnable_tree_node, &queue->runnable_tree); +} + +/** + * jsctx_rb_unpull(): - Undo marking of atom in list as running + * @kctx: Pointer to kbase context with ring buffer. + * @katom: Pointer to katom to unpull. + * + * Undo jsctx_rb_pull() and put @katom back in the queue. + * + * jsctx_rb_unpull() must be called on atoms in the same order the atoms were + * pulled. + */ +static inline void +jsctx_rb_unpull(struct kbase_context *kctx, struct kbase_jd_atom *katom) +{ + lockdep_assert_held(&kctx->kbdev->hwaccess_lock); + + jsctx_tree_add(kctx, katom); +} + +static bool kbase_js_ctx_pullable(struct kbase_context *kctx, + int js, + bool is_scheduled); +static bool kbase_js_ctx_list_add_pullable_nolock(struct kbase_device *kbdev, + struct kbase_context *kctx, + int js); +static bool kbase_js_ctx_list_add_unpullable_nolock(struct kbase_device *kbdev, + struct kbase_context *kctx, + int js); + +/* + * Functions private to KBase ('Protected' functions) + */ +int kbasep_js_devdata_init(struct kbase_device * const kbdev) +{ + struct kbasep_js_device_data *jsdd; + int i; + + KBASE_DEBUG_ASSERT(kbdev != NULL); + + jsdd = &kbdev->js_data; + +#ifdef CONFIG_MALI_DEBUG + /* Soft-stop will be disabled on a single context by default unless + * softstop_always is set */ + jsdd->softstop_always = false; +#endif /* CONFIG_MALI_DEBUG */ + jsdd->nr_all_contexts_running = 0; + jsdd->nr_user_contexts_running = 0; + jsdd->nr_contexts_pullable = 0; + atomic_set(&jsdd->nr_contexts_runnable, 0); + /* No ctx allowed to submit */ + jsdd->runpool_irq.submit_allowed = 0u; + memset(jsdd->runpool_irq.ctx_attr_ref_count, 0, + sizeof(jsdd->runpool_irq.ctx_attr_ref_count)); + memset(jsdd->runpool_irq.slot_affinities, 0, + sizeof(jsdd->runpool_irq.slot_affinities)); + memset(jsdd->runpool_irq.slot_affinity_refcount, 0, + sizeof(jsdd->runpool_irq.slot_affinity_refcount)); + INIT_LIST_HEAD(&jsdd->suspended_soft_jobs_list); + + /* Config attributes */ + jsdd->scheduling_period_ns = DEFAULT_JS_SCHEDULING_PERIOD_NS; + jsdd->soft_stop_ticks = DEFAULT_JS_SOFT_STOP_TICKS; + jsdd->soft_stop_ticks_cl = DEFAULT_JS_SOFT_STOP_TICKS_CL; + if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8408)) + jsdd->hard_stop_ticks_ss = DEFAULT_JS_HARD_STOP_TICKS_SS_8408; + else + jsdd->hard_stop_ticks_ss = DEFAULT_JS_HARD_STOP_TICKS_SS; + jsdd->hard_stop_ticks_cl = DEFAULT_JS_HARD_STOP_TICKS_CL; + jsdd->hard_stop_ticks_dumping = DEFAULT_JS_HARD_STOP_TICKS_DUMPING; + if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8408)) + jsdd->gpu_reset_ticks_ss = DEFAULT_JS_RESET_TICKS_SS_8408; + else + jsdd->gpu_reset_ticks_ss = DEFAULT_JS_RESET_TICKS_SS; + jsdd->gpu_reset_ticks_cl = DEFAULT_JS_RESET_TICKS_CL; + jsdd->gpu_reset_ticks_dumping = DEFAULT_JS_RESET_TICKS_DUMPING; + jsdd->ctx_timeslice_ns = DEFAULT_JS_CTX_TIMESLICE_NS; + atomic_set(&jsdd->soft_job_timeout_ms, DEFAULT_JS_SOFT_JOB_TIMEOUT); + + dev_dbg(kbdev->dev, "JS Config Attribs: "); + dev_dbg(kbdev->dev, "\tscheduling_period_ns:%u", + jsdd->scheduling_period_ns); + dev_dbg(kbdev->dev, "\tsoft_stop_ticks:%u", + jsdd->soft_stop_ticks); + dev_dbg(kbdev->dev, "\tsoft_stop_ticks_cl:%u", + jsdd->soft_stop_ticks_cl); + dev_dbg(kbdev->dev, "\thard_stop_ticks_ss:%u", + jsdd->hard_stop_ticks_ss); + dev_dbg(kbdev->dev, "\thard_stop_ticks_cl:%u", + jsdd->hard_stop_ticks_cl); + dev_dbg(kbdev->dev, "\thard_stop_ticks_dumping:%u", + jsdd->hard_stop_ticks_dumping); + dev_dbg(kbdev->dev, "\tgpu_reset_ticks_ss:%u", + jsdd->gpu_reset_ticks_ss); + dev_dbg(kbdev->dev, "\tgpu_reset_ticks_cl:%u", + jsdd->gpu_reset_ticks_cl); + dev_dbg(kbdev->dev, "\tgpu_reset_ticks_dumping:%u", + jsdd->gpu_reset_ticks_dumping); + dev_dbg(kbdev->dev, "\tctx_timeslice_ns:%u", + jsdd->ctx_timeslice_ns); + dev_dbg(kbdev->dev, "\tsoft_job_timeout:%i", + atomic_read(&jsdd->soft_job_timeout_ms)); + + if (!(jsdd->soft_stop_ticks < jsdd->hard_stop_ticks_ss && + jsdd->hard_stop_ticks_ss < jsdd->gpu_reset_ticks_ss && + jsdd->soft_stop_ticks < jsdd->hard_stop_ticks_dumping && + jsdd->hard_stop_ticks_dumping < + jsdd->gpu_reset_ticks_dumping)) { + dev_err(kbdev->dev, "Job scheduler timeouts invalid; soft/hard/reset tick counts should be in increasing order\n"); + return -EINVAL; + } + +#if KBASE_DISABLE_SCHEDULING_SOFT_STOPS + dev_dbg(kbdev->dev, "Job Scheduling Soft-stops disabled, ignoring value for soft_stop_ticks==%u at %uns per tick. Other soft-stops may still occur.", + jsdd->soft_stop_ticks, + jsdd->scheduling_period_ns); +#endif +#if KBASE_DISABLE_SCHEDULING_HARD_STOPS + dev_dbg(kbdev->dev, "Job Scheduling Hard-stops disabled, ignoring values for hard_stop_ticks_ss==%d and hard_stop_ticks_dumping==%u at %uns per tick. Other hard-stops may still occur.", + jsdd->hard_stop_ticks_ss, + jsdd->hard_stop_ticks_dumping, + jsdd->scheduling_period_ns); +#endif +#if KBASE_DISABLE_SCHEDULING_SOFT_STOPS && KBASE_DISABLE_SCHEDULING_HARD_STOPS + dev_dbg(kbdev->dev, "Note: The JS tick timer (if coded) will still be run, but do nothing."); +#endif + + for (i = 0; i < kbdev->gpu_props.num_job_slots; ++i) + jsdd->js_reqs[i] = core_reqs_from_jsn_features( + kbdev->gpu_props.props.raw_props.js_features[i]); + + /* On error, we could continue on: providing none of the below resources + * rely on the ones above */ + + mutex_init(&jsdd->runpool_mutex); + mutex_init(&jsdd->queue_mutex); + spin_lock_init(&kbdev->hwaccess_lock); + sema_init(&jsdd->schedule_sem, 1); + + for (i = 0; i < kbdev->gpu_props.num_job_slots; ++i) { + INIT_LIST_HEAD(&jsdd->ctx_list_pullable[i]); + INIT_LIST_HEAD(&jsdd->ctx_list_unpullable[i]); + } + + return 0; +} + +void kbasep_js_devdata_halt(struct kbase_device *kbdev) +{ + CSTD_UNUSED(kbdev); +} + +void kbasep_js_devdata_term(struct kbase_device *kbdev) +{ + struct kbasep_js_device_data *js_devdata; + s8 zero_ctx_attr_ref_count[KBASEP_JS_CTX_ATTR_COUNT] = { 0, }; + + KBASE_DEBUG_ASSERT(kbdev != NULL); + + js_devdata = &kbdev->js_data; + + /* The caller must de-register all contexts before calling this + */ + KBASE_DEBUG_ASSERT(js_devdata->nr_all_contexts_running == 0); + KBASE_DEBUG_ASSERT(memcmp( + js_devdata->runpool_irq.ctx_attr_ref_count, + zero_ctx_attr_ref_count, + sizeof(zero_ctx_attr_ref_count)) == 0); + CSTD_UNUSED(zero_ctx_attr_ref_count); +} + +int kbasep_js_kctx_init(struct kbase_context * const kctx) +{ + struct kbase_device *kbdev; + struct kbasep_js_kctx_info *js_kctx_info; + int i, j; + + KBASE_DEBUG_ASSERT(kctx != NULL); + + kbdev = kctx->kbdev; + KBASE_DEBUG_ASSERT(kbdev != NULL); + + for (i = 0; i < BASE_JM_MAX_NR_SLOTS; ++i) + INIT_LIST_HEAD(&kctx->jctx.sched_info.ctx.ctx_list_entry[i]); + + js_kctx_info = &kctx->jctx.sched_info; + + js_kctx_info->ctx.nr_jobs = 0; + kbase_ctx_flag_clear(kctx, KCTX_SCHEDULED); + kbase_ctx_flag_clear(kctx, KCTX_DYING); + memset(js_kctx_info->ctx.ctx_attr_ref_count, 0, + sizeof(js_kctx_info->ctx.ctx_attr_ref_count)); + + /* Initially, the context is disabled from submission until the create + * flags are set */ + kbase_ctx_flag_set(kctx, KCTX_SUBMIT_DISABLED); + + /* On error, we could continue on: providing none of the below resources + * rely on the ones above */ + mutex_init(&js_kctx_info->ctx.jsctx_mutex); + + init_waitqueue_head(&js_kctx_info->ctx.is_scheduled_wait); + + for (i = 0; i < KBASE_JS_ATOM_SCHED_PRIO_COUNT; i++) { + for (j = 0; j < BASE_JM_MAX_NR_SLOTS; j++) { + INIT_LIST_HEAD(&kctx->jsctx_queue[i][j].x_dep_head); + kctx->jsctx_queue[i][j].runnable_tree = RB_ROOT; + } + } + + return 0; +} + +void kbasep_js_kctx_term(struct kbase_context *kctx) +{ + struct kbase_device *kbdev; + struct kbasep_js_kctx_info *js_kctx_info; + int js; + bool update_ctx_count = false; + + KBASE_DEBUG_ASSERT(kctx != NULL); + + kbdev = kctx->kbdev; + KBASE_DEBUG_ASSERT(kbdev != NULL); + + js_kctx_info = &kctx->jctx.sched_info; + + /* The caller must de-register all jobs before calling this */ + KBASE_DEBUG_ASSERT(!kbase_ctx_flag(kctx, KCTX_SCHEDULED)); + KBASE_DEBUG_ASSERT(js_kctx_info->ctx.nr_jobs == 0); + + mutex_lock(&kbdev->js_data.queue_mutex); + mutex_lock(&kctx->jctx.sched_info.ctx.jsctx_mutex); + + for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) + list_del_init(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]); + + if (kbase_ctx_flag(kctx, KCTX_RUNNABLE_REF)) { + WARN_ON(atomic_read(&kbdev->js_data.nr_contexts_runnable) <= 0); + atomic_dec(&kbdev->js_data.nr_contexts_runnable); + update_ctx_count = true; + kbase_ctx_flag_clear(kctx, KCTX_RUNNABLE_REF); + } + + mutex_unlock(&kctx->jctx.sched_info.ctx.jsctx_mutex); + mutex_unlock(&kbdev->js_data.queue_mutex); + + if (update_ctx_count) { + mutex_lock(&kbdev->js_data.runpool_mutex); + kbase_backend_ctx_count_changed(kbdev); + mutex_unlock(&kbdev->js_data.runpool_mutex); + } +} + +/** + * kbase_js_ctx_list_add_pullable_nolock - Variant of + * kbase_jd_ctx_list_add_pullable() + * where the caller must hold + * hwaccess_lock + * @kbdev: Device pointer + * @kctx: Context to add to queue + * @js: Job slot to use + * + * Caller must hold hwaccess_lock + * + * Return: true if caller should call kbase_backend_ctx_count_changed() + */ +static bool kbase_js_ctx_list_add_pullable_nolock(struct kbase_device *kbdev, + struct kbase_context *kctx, + int js) +{ + bool ret = false; + + lockdep_assert_held(&kbdev->hwaccess_lock); + + if (!list_empty(&kctx->jctx.sched_info.ctx.ctx_list_entry[js])) + list_del_init(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]); + + list_add_tail(&kctx->jctx.sched_info.ctx.ctx_list_entry[js], + &kbdev->js_data.ctx_list_pullable[js]); + + if (!kctx->slots_pullable) { + kbdev->js_data.nr_contexts_pullable++; + ret = true; + if (!atomic_read(&kctx->atoms_pulled)) { + WARN_ON(kbase_ctx_flag(kctx, KCTX_RUNNABLE_REF)); + kbase_ctx_flag_set(kctx, KCTX_RUNNABLE_REF); + atomic_inc(&kbdev->js_data.nr_contexts_runnable); + } + } + kctx->slots_pullable |= (1 << js); + + return ret; +} + +/** + * kbase_js_ctx_list_add_pullable_head_nolock - Variant of + * kbase_js_ctx_list_add_pullable_head() + * where the caller must hold + * hwaccess_lock + * @kbdev: Device pointer + * @kctx: Context to add to queue + * @js: Job slot to use + * + * Caller must hold hwaccess_lock + * + * Return: true if caller should call kbase_backend_ctx_count_changed() + */ +static bool kbase_js_ctx_list_add_pullable_head_nolock( + struct kbase_device *kbdev, struct kbase_context *kctx, int js) +{ + bool ret = false; + + lockdep_assert_held(&kbdev->hwaccess_lock); + + if (!list_empty(&kctx->jctx.sched_info.ctx.ctx_list_entry[js])) + list_del_init(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]); + + list_add(&kctx->jctx.sched_info.ctx.ctx_list_entry[js], + &kbdev->js_data.ctx_list_pullable[js]); + + if (!kctx->slots_pullable) { + kbdev->js_data.nr_contexts_pullable++; + ret = true; + if (!atomic_read(&kctx->atoms_pulled)) { + WARN_ON(kbase_ctx_flag(kctx, KCTX_RUNNABLE_REF)); + kbase_ctx_flag_set(kctx, KCTX_RUNNABLE_REF); + atomic_inc(&kbdev->js_data.nr_contexts_runnable); + } + } + kctx->slots_pullable |= (1 << js); + + return ret; +} + +/** + * kbase_js_ctx_list_add_pullable_head - Add context to the head of the + * per-slot pullable context queue + * @kbdev: Device pointer + * @kctx: Context to add to queue + * @js: Job slot to use + * + * If the context is on either the pullable or unpullable queues, then it is + * removed before being added to the head. + * + * This function should be used when a context has been scheduled, but no jobs + * can currently be pulled from it. + * + * Return: true if caller should call kbase_backend_ctx_count_changed() + */ +static bool kbase_js_ctx_list_add_pullable_head(struct kbase_device *kbdev, + struct kbase_context *kctx, + int js) +{ + bool ret; + unsigned long flags; + + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); + ret = kbase_js_ctx_list_add_pullable_head_nolock(kbdev, kctx, js); + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); + + return ret; +} + +/** + * kbase_js_ctx_list_add_unpullable_nolock - Add context to the tail of the + * per-slot unpullable context queue + * @kbdev: Device pointer + * @kctx: Context to add to queue + * @js: Job slot to use + * + * The context must already be on the per-slot pullable queue. It will be + * removed from the pullable queue before being added to the unpullable queue. + * + * This function should be used when a context has been pulled from, and there + * are no jobs remaining on the specified slot. + * + * Caller must hold hwaccess_lock + * + * Return: true if caller should call kbase_backend_ctx_count_changed() + */ +static bool kbase_js_ctx_list_add_unpullable_nolock(struct kbase_device *kbdev, + struct kbase_context *kctx, + int js) +{ + bool ret = false; + + lockdep_assert_held(&kbdev->hwaccess_lock); + + list_move_tail(&kctx->jctx.sched_info.ctx.ctx_list_entry[js], + &kbdev->js_data.ctx_list_unpullable[js]); + + if (kctx->slots_pullable == (1 << js)) { + kbdev->js_data.nr_contexts_pullable--; + ret = true; + if (!atomic_read(&kctx->atoms_pulled)) { + WARN_ON(!kbase_ctx_flag(kctx, KCTX_RUNNABLE_REF)); + kbase_ctx_flag_clear(kctx, KCTX_RUNNABLE_REF); + atomic_dec(&kbdev->js_data.nr_contexts_runnable); + } + } + kctx->slots_pullable &= ~(1 << js); + + return ret; +} + +/** + * kbase_js_ctx_list_remove_nolock - Remove context from the per-slot pullable + * or unpullable context queues + * @kbdev: Device pointer + * @kctx: Context to remove from queue + * @js: Job slot to use + * + * The context must already be on one of the queues. + * + * This function should be used when a context has no jobs on the GPU, and no + * jobs remaining for the specified slot. + * + * Caller must hold hwaccess_lock + * + * Return: true if caller should call kbase_backend_ctx_count_changed() + */ +static bool kbase_js_ctx_list_remove_nolock(struct kbase_device *kbdev, + struct kbase_context *kctx, + int js) +{ + bool ret = false; + + lockdep_assert_held(&kbdev->hwaccess_lock); + + WARN_ON(list_empty(&kctx->jctx.sched_info.ctx.ctx_list_entry[js])); + + list_del_init(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]); + + if (kctx->slots_pullable == (1 << js)) { + kbdev->js_data.nr_contexts_pullable--; + ret = true; + if (!atomic_read(&kctx->atoms_pulled)) { + WARN_ON(!kbase_ctx_flag(kctx, KCTX_RUNNABLE_REF)); + kbase_ctx_flag_clear(kctx, KCTX_RUNNABLE_REF); + atomic_dec(&kbdev->js_data.nr_contexts_runnable); + } + } + kctx->slots_pullable &= ~(1 << js); + + return ret; +} + +/** + * kbase_js_ctx_list_pop_head_nolock - Variant of kbase_js_ctx_list_pop_head() + * where the caller must hold + * hwaccess_lock + * @kbdev: Device pointer + * @js: Job slot to use + * + * Caller must hold hwaccess_lock + * + * Return: Context to use for specified slot. + * NULL if no contexts present for specified slot + */ +static struct kbase_context *kbase_js_ctx_list_pop_head_nolock( + struct kbase_device *kbdev, + int js) +{ + struct kbase_context *kctx; + + lockdep_assert_held(&kbdev->hwaccess_lock); + + if (list_empty(&kbdev->js_data.ctx_list_pullable[js])) + return NULL; + + kctx = list_entry(kbdev->js_data.ctx_list_pullable[js].next, + struct kbase_context, + jctx.sched_info.ctx.ctx_list_entry[js]); + + list_del_init(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]); + + return kctx; +} + +/** + * kbase_js_ctx_list_pop_head - Pop the head context off the per-slot pullable + * queue. + * @kbdev: Device pointer + * @js: Job slot to use + * + * Return: Context to use for specified slot. + * NULL if no contexts present for specified slot + */ +static struct kbase_context *kbase_js_ctx_list_pop_head( + struct kbase_device *kbdev, int js) +{ + struct kbase_context *kctx; + unsigned long flags; + + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); + kctx = kbase_js_ctx_list_pop_head_nolock(kbdev, js); + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); + + return kctx; +} + +/** + * kbase_js_ctx_pullable - Return if a context can be pulled from on the + * specified slot + * @kctx: Context pointer + * @js: Job slot to use + * @is_scheduled: true if the context is currently scheduled + * + * Caller must hold hwaccess_lock + * + * Return: true if context can be pulled from on specified slot + * false otherwise + */ +static bool kbase_js_ctx_pullable(struct kbase_context *kctx, int js, + bool is_scheduled) +{ + struct kbasep_js_device_data *js_devdata; + struct kbase_jd_atom *katom; + + lockdep_assert_held(&kctx->kbdev->hwaccess_lock); + + js_devdata = &kctx->kbdev->js_data; + + if (is_scheduled) { + if (!kbasep_js_is_submit_allowed(js_devdata, kctx)) + return false; + } + katom = jsctx_rb_peek(kctx, js); + if (!katom) + return false; /* No pullable atoms */ + if (kctx->blocked_js[js][katom->sched_priority]) + return false; + if (atomic_read(&katom->blocked)) + return false; /* next atom blocked */ + if (katom->atom_flags & KBASE_KATOM_FLAG_X_DEP_BLOCKED) { + if (katom->x_pre_dep->gpu_rb_state == + KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB || + katom->x_pre_dep->will_fail_event_code) + return false; + if ((katom->atom_flags & KBASE_KATOM_FLAG_FAIL_BLOCKER) && + kbase_backend_nr_atoms_on_slot(kctx->kbdev, js)) + return false; + } + + return true; +} + +static bool kbase_js_dep_validate(struct kbase_context *kctx, + struct kbase_jd_atom *katom) +{ + struct kbase_device *kbdev = kctx->kbdev; + bool ret = true; + bool has_dep = false, has_x_dep = false; + int js = kbase_js_get_slot(kbdev, katom); + int prio = katom->sched_priority; + int i; + + for (i = 0; i < 2; i++) { + struct kbase_jd_atom *dep_atom = katom->dep[i].atom; + + if (dep_atom) { + int dep_js = kbase_js_get_slot(kbdev, dep_atom); + int dep_prio = dep_atom->sched_priority; + + /* Dependent atom must already have been submitted */ + if (!(dep_atom->atom_flags & + KBASE_KATOM_FLAG_JSCTX_IN_TREE)) { + ret = false; + break; + } + + /* Dependencies with different priorities can't + be represented in the ringbuffer */ + if (prio != dep_prio) { + ret = false; + break; + } + + if (js == dep_js) { + /* Only one same-slot dependency can be + * represented in the ringbuffer */ + if (has_dep) { + ret = false; + break; + } + /* Each dependee atom can only have one + * same-slot dependency */ + if (dep_atom->post_dep) { + ret = false; + break; + } + has_dep = true; + } else { + /* Only one cross-slot dependency can be + * represented in the ringbuffer */ + if (has_x_dep) { + ret = false; + break; + } + /* Each dependee atom can only have one + * cross-slot dependency */ + if (dep_atom->x_post_dep) { + ret = false; + break; + } + /* The dependee atom can not already be in the + * HW access ringbuffer */ + if (dep_atom->gpu_rb_state != + KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB) { + ret = false; + break; + } + /* The dependee atom can not already have + * completed */ + if (dep_atom->status != + KBASE_JD_ATOM_STATE_IN_JS) { + ret = false; + break; + } + /* Cross-slot dependencies must not violate + * PRLAM-8987 affinity restrictions */ + if (kbase_hw_has_issue(kbdev, + BASE_HW_ISSUE_8987) && + (js == 2 || dep_js == 2)) { + ret = false; + break; + } + has_x_dep = true; + } + + /* Dependency can be represented in ringbuffers */ + } + } + + /* If dependencies can be represented by ringbuffer then clear them from + * atom structure */ + if (ret) { + for (i = 0; i < 2; i++) { + struct kbase_jd_atom *dep_atom = katom->dep[i].atom; + + if (dep_atom) { + int dep_js = kbase_js_get_slot(kbdev, dep_atom); + + if ((js != dep_js) && + (dep_atom->status != + KBASE_JD_ATOM_STATE_COMPLETED) + && (dep_atom->status != + KBASE_JD_ATOM_STATE_HW_COMPLETED) + && (dep_atom->status != + KBASE_JD_ATOM_STATE_UNUSED)) { + + katom->atom_flags |= + KBASE_KATOM_FLAG_X_DEP_BLOCKED; + katom->x_pre_dep = dep_atom; + dep_atom->x_post_dep = katom; + if (kbase_jd_katom_dep_type( + &katom->dep[i]) == + BASE_JD_DEP_TYPE_DATA) + katom->atom_flags |= + KBASE_KATOM_FLAG_FAIL_BLOCKER; + } + if ((kbase_jd_katom_dep_type(&katom->dep[i]) + == BASE_JD_DEP_TYPE_DATA) && + (js == dep_js)) { + katom->pre_dep = dep_atom; + dep_atom->post_dep = katom; + } + + list_del(&katom->dep_item[i]); + kbase_jd_katom_dep_clear(&katom->dep[i]); + } + } + } + + return ret; +} + +bool kbasep_js_add_job(struct kbase_context *kctx, + struct kbase_jd_atom *atom) +{ + unsigned long flags; + struct kbasep_js_kctx_info *js_kctx_info; + struct kbase_device *kbdev; + struct kbasep_js_device_data *js_devdata; + + bool enqueue_required = false; + bool timer_sync = false; + + KBASE_DEBUG_ASSERT(kctx != NULL); + KBASE_DEBUG_ASSERT(atom != NULL); + lockdep_assert_held(&kctx->jctx.lock); + + kbdev = kctx->kbdev; + js_devdata = &kbdev->js_data; + js_kctx_info = &kctx->jctx.sched_info; + + mutex_lock(&js_devdata->queue_mutex); + mutex_lock(&js_kctx_info->ctx.jsctx_mutex); + + /* + * Begin Runpool transaction + */ + mutex_lock(&js_devdata->runpool_mutex); + + /* Refcount ctx.nr_jobs */ + KBASE_DEBUG_ASSERT(js_kctx_info->ctx.nr_jobs < U32_MAX); + ++(js_kctx_info->ctx.nr_jobs); + + /* Setup any scheduling information */ + kbasep_js_clear_job_retry_submit(atom); + + /* Lock for state available during IRQ */ + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); + + if (!kbase_js_dep_validate(kctx, atom)) { + /* Dependencies could not be represented */ + --(js_kctx_info->ctx.nr_jobs); + + /* Setting atom status back to queued as it still has unresolved + * dependencies */ + atom->status = KBASE_JD_ATOM_STATE_QUEUED; + + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); + mutex_unlock(&js_devdata->runpool_mutex); + + goto out_unlock; + } + + KBASE_TLSTREAM_TL_ATTRIB_ATOM_STATE(atom, TL_ATOM_STATE_READY); + KBASE_TIMELINE_ATOM_READY(kctx, kbase_jd_atom_id(kctx, atom)); + + enqueue_required = kbase_js_dep_resolved_submit(kctx, atom); + + KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_ADD_JOB, kctx, atom, atom->jc, + kbasep_js_trace_get_refcnt(kbdev, kctx)); + + /* Context Attribute Refcounting */ + kbasep_js_ctx_attr_ctx_retain_atom(kbdev, kctx, atom); + + if (enqueue_required) { + if (kbase_js_ctx_pullable(kctx, atom->slot_nr, false)) + timer_sync = kbase_js_ctx_list_add_pullable_nolock( + kbdev, kctx, atom->slot_nr); + else + timer_sync = kbase_js_ctx_list_add_unpullable_nolock( + kbdev, kctx, atom->slot_nr); + } + /* If this context is active and the atom is the first on its slot, + * kick the job manager to attempt to fast-start the atom */ + if (enqueue_required && kctx == kbdev->hwaccess.active_kctx) + kbase_jm_try_kick(kbdev, 1 << atom->slot_nr); + + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); + if (timer_sync) + kbase_backend_ctx_count_changed(kbdev); + mutex_unlock(&js_devdata->runpool_mutex); + /* End runpool transaction */ + + if (!kbase_ctx_flag(kctx, KCTX_SCHEDULED)) { + if (kbase_ctx_flag(kctx, KCTX_DYING)) { + /* A job got added while/after kbase_job_zap_context() + * was called on a non-scheduled context. Kill that job + * by killing the context. */ + kbasep_js_runpool_requeue_or_kill_ctx(kbdev, kctx, + false); + } else if (js_kctx_info->ctx.nr_jobs == 1) { + /* Handle Refcount going from 0 to 1: schedule the + * context on the Queue */ + KBASE_DEBUG_ASSERT(!kbase_ctx_flag(kctx, KCTX_SCHEDULED)); + dev_dbg(kbdev->dev, "JS: Enqueue Context %p", kctx); + + /* Queue was updated - caller must try to + * schedule the head context */ + WARN_ON(!enqueue_required); + } + } +out_unlock: + mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); + + mutex_unlock(&js_devdata->queue_mutex); + + return enqueue_required; +} + +void kbasep_js_remove_job(struct kbase_device *kbdev, + struct kbase_context *kctx, struct kbase_jd_atom *atom) +{ + struct kbasep_js_kctx_info *js_kctx_info; + + KBASE_DEBUG_ASSERT(kbdev != NULL); + KBASE_DEBUG_ASSERT(kctx != NULL); + KBASE_DEBUG_ASSERT(atom != NULL); + + js_kctx_info = &kctx->jctx.sched_info; + + KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_REMOVE_JOB, kctx, atom, atom->jc, + kbasep_js_trace_get_refcnt(kbdev, kctx)); + + /* De-refcount ctx.nr_jobs */ + KBASE_DEBUG_ASSERT(js_kctx_info->ctx.nr_jobs > 0); + --(js_kctx_info->ctx.nr_jobs); +} + +bool kbasep_js_remove_cancelled_job(struct kbase_device *kbdev, + struct kbase_context *kctx, struct kbase_jd_atom *katom) +{ + unsigned long flags; + struct kbasep_js_atom_retained_state katom_retained_state; + bool attr_state_changed; + + KBASE_DEBUG_ASSERT(kbdev != NULL); + KBASE_DEBUG_ASSERT(kctx != NULL); + KBASE_DEBUG_ASSERT(katom != NULL); + + kbasep_js_atom_retained_state_copy(&katom_retained_state, katom); + kbasep_js_remove_job(kbdev, kctx, katom); + + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); + + /* The atom has 'finished' (will not be re-run), so no need to call + * kbasep_js_has_atom_finished(). + * + * This is because it returns false for soft-stopped atoms, but we + * want to override that, because we're cancelling an atom regardless of + * whether it was soft-stopped or not */ + attr_state_changed = kbasep_js_ctx_attr_ctx_release_atom(kbdev, kctx, + &katom_retained_state); + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); + + return attr_state_changed; +} + +bool kbasep_js_runpool_retain_ctx(struct kbase_device *kbdev, + struct kbase_context *kctx) +{ + unsigned long flags; + bool result; + + KBASE_DEBUG_ASSERT(kbdev != NULL); + + mutex_lock(&kbdev->mmu_hw_mutex); + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); + result = kbasep_js_runpool_retain_ctx_nolock(kbdev, kctx); + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); + mutex_unlock(&kbdev->mmu_hw_mutex); + + return result; +} + +struct kbase_context *kbasep_js_runpool_lookup_ctx(struct kbase_device *kbdev, + int as_nr) +{ + unsigned long flags; + struct kbase_context *found_kctx = NULL; + + KBASE_DEBUG_ASSERT(kbdev != NULL); + KBASE_DEBUG_ASSERT(0 <= as_nr && as_nr < BASE_MAX_NR_AS); + + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); + + found_kctx = kbdev->as_to_kctx[as_nr]; + + if (found_kctx != NULL) + kbase_ctx_sched_retain_ctx_refcount(found_kctx); + + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); + + return found_kctx; +} + +/** + * kbasep_js_release_result - Try running more jobs after releasing a context + * and/or atom + * + * @kbdev: The kbase_device to operate on + * @kctx: The kbase_context to operate on + * @katom_retained_state: Retained state from the atom + * @runpool_ctx_attr_change: True if the runpool context attributes have changed + * + * This collates a set of actions that must happen whilst hwaccess_lock is held. + * + * This includes running more jobs when: + * - The previously released kctx caused a ctx attribute change, + * - The released atom caused a ctx attribute change, + * - Slots were previously blocked due to affinity restrictions, + * - Submission during IRQ handling failed. + * + * Return: %KBASEP_JS_RELEASE_RESULT_SCHED_ALL if context attributes were + * changed. The caller should try scheduling all contexts + */ +static kbasep_js_release_result kbasep_js_run_jobs_after_ctx_and_atom_release( + struct kbase_device *kbdev, + struct kbase_context *kctx, + struct kbasep_js_atom_retained_state *katom_retained_state, + bool runpool_ctx_attr_change) +{ + struct kbasep_js_device_data *js_devdata; + kbasep_js_release_result result = 0; + + KBASE_DEBUG_ASSERT(kbdev != NULL); + KBASE_DEBUG_ASSERT(kctx != NULL); + KBASE_DEBUG_ASSERT(katom_retained_state != NULL); + js_devdata = &kbdev->js_data; + + lockdep_assert_held(&kctx->jctx.sched_info.ctx.jsctx_mutex); + lockdep_assert_held(&js_devdata->runpool_mutex); + lockdep_assert_held(&kbdev->hwaccess_lock); + + if (js_devdata->nr_user_contexts_running != 0) { + bool retry_submit = false; + int retry_jobslot = 0; + + if (katom_retained_state) + retry_submit = kbasep_js_get_atom_retry_submit_slot( + katom_retained_state, &retry_jobslot); + + if (runpool_ctx_attr_change || retry_submit) { + /* A change in runpool ctx attributes might mean we can + * run more jobs than before */ + result = KBASEP_JS_RELEASE_RESULT_SCHED_ALL; + + KBASE_TRACE_ADD_SLOT(kbdev, JD_DONE_TRY_RUN_NEXT_JOB, + kctx, NULL, 0u, retry_jobslot); + } + } + return result; +} + +/* + * Internal function to release the reference on a ctx and an atom's "retained + * state", only taking the runpool and as transaction mutexes + * + * This also starts more jobs running in the case of an ctx-attribute state + * change + * + * This does none of the followup actions for scheduling: + * - It does not schedule in a new context + * - It does not requeue or handle dying contexts + * + * For those tasks, just call kbasep_js_runpool_release_ctx() instead + * + * Requires: + * - Context is scheduled in, and kctx->as_nr matches kctx_as_nr + * - Context has a non-zero refcount + * - Caller holds js_kctx_info->ctx.jsctx_mutex + * - Caller holds js_devdata->runpool_mutex + */ +static kbasep_js_release_result kbasep_js_runpool_release_ctx_internal( + struct kbase_device *kbdev, + struct kbase_context *kctx, + struct kbasep_js_atom_retained_state *katom_retained_state) +{ + unsigned long flags; + struct kbasep_js_device_data *js_devdata; + struct kbasep_js_kctx_info *js_kctx_info; + + kbasep_js_release_result release_result = 0u; + bool runpool_ctx_attr_change = false; + int kctx_as_nr; + int new_ref_count; + + KBASE_DEBUG_ASSERT(kbdev != NULL); + KBASE_DEBUG_ASSERT(kctx != NULL); + js_kctx_info = &kctx->jctx.sched_info; + js_devdata = &kbdev->js_data; + + /* Ensure context really is scheduled in */ + KBASE_DEBUG_ASSERT(kbase_ctx_flag(kctx, KCTX_SCHEDULED)); + + kctx_as_nr = kctx->as_nr; + KBASE_DEBUG_ASSERT(kctx_as_nr != KBASEP_AS_NR_INVALID); + KBASE_DEBUG_ASSERT(atomic_read(&kctx->refcount) > 0); + + /* + * Transaction begins on AS and runpool_irq + * + * Assert about out calling contract + */ + mutex_lock(&kbdev->pm.lock); + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); + + KBASE_DEBUG_ASSERT(kctx_as_nr == kctx->as_nr); + KBASE_DEBUG_ASSERT(atomic_read(&kctx->refcount) > 0); + + /* Update refcount */ + kbase_ctx_sched_release_ctx(kctx); + new_ref_count = atomic_read(&kctx->refcount); + + /* Release the atom if it finished (i.e. wasn't soft-stopped) */ + if (kbasep_js_has_atom_finished(katom_retained_state)) + runpool_ctx_attr_change |= kbasep_js_ctx_attr_ctx_release_atom( + kbdev, kctx, katom_retained_state); + + KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_RELEASE_CTX, kctx, NULL, 0u, + new_ref_count); + + if (new_ref_count == 2 && kbase_ctx_flag(kctx, KCTX_PRIVILEGED) && + !kbase_pm_is_suspending(kbdev)) { + /* Context is kept scheduled into an address space even when + * there are no jobs, in this case we have to handle the + * situation where all jobs have been evicted from the GPU and + * submission is disabled. + * + * At this point we re-enable submission to allow further jobs + * to be executed + */ + kbasep_js_set_submit_allowed(js_devdata, kctx); + } + + /* Make a set of checks to see if the context should be scheduled out. + * Note that there'll always be at least 1 reference to the context + * which was previously acquired by kbasep_js_schedule_ctx(). */ + if (new_ref_count == 1 && + (!kbasep_js_is_submit_allowed(js_devdata, kctx) || + kbdev->pm.suspending)) { + int num_slots = kbdev->gpu_props.num_job_slots; + int slot; + + /* Last reference, and we've been told to remove this context + * from the Run Pool */ + dev_dbg(kbdev->dev, "JS: RunPool Remove Context %p because refcount=%d, jobs=%d, allowed=%d", + kctx, new_ref_count, js_kctx_info->ctx.nr_jobs, + kbasep_js_is_submit_allowed(js_devdata, kctx)); + +#if defined(CONFIG_MALI_GATOR_SUPPORT) + kbase_trace_mali_mmu_as_released(kctx->as_nr); +#endif + KBASE_TLSTREAM_TL_NRET_AS_CTX(&kbdev->as[kctx->as_nr], kctx); + + kbase_backend_release_ctx_irq(kbdev, kctx); + + if (kbdev->hwaccess.active_kctx == kctx) + kbdev->hwaccess.active_kctx = NULL; + + /* Ctx Attribute handling + * + * Releasing atoms attributes must either happen before this, or + * after the KCTX_SHEDULED flag is changed, otherwise we + * double-decount the attributes + */ + runpool_ctx_attr_change |= + kbasep_js_ctx_attr_runpool_release_ctx(kbdev, kctx); + + /* Releasing the context and katom retained state can allow + * more jobs to run */ + release_result |= + kbasep_js_run_jobs_after_ctx_and_atom_release(kbdev, + kctx, katom_retained_state, + runpool_ctx_attr_change); + + /* + * Transaction ends on AS and runpool_irq: + * + * By this point, the AS-related data is now clear and ready + * for re-use. + * + * Since releases only occur once for each previous successful + * retain, and no more retains are allowed on this context, no + * other thread will be operating in this + * code whilst we are + */ + + /* Recalculate pullable status for all slots */ + for (slot = 0; slot < num_slots; slot++) { + if (kbase_js_ctx_pullable(kctx, slot, false)) + kbase_js_ctx_list_add_pullable_nolock(kbdev, + kctx, slot); + } + + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); + + kbase_backend_release_ctx_noirq(kbdev, kctx); + + mutex_unlock(&kbdev->pm.lock); + + /* Note: Don't reuse kctx_as_nr now */ + + /* Synchronize with any timers */ + kbase_backend_ctx_count_changed(kbdev); + + /* update book-keeping info */ + kbase_ctx_flag_clear(kctx, KCTX_SCHEDULED); + /* Signal any waiter that the context is not scheduled, so is + * safe for termination - once the jsctx_mutex is also dropped, + * and jobs have finished. */ + wake_up(&js_kctx_info->ctx.is_scheduled_wait); + + /* Queue an action to occur after we've dropped the lock */ + release_result |= KBASEP_JS_RELEASE_RESULT_WAS_DESCHEDULED | + KBASEP_JS_RELEASE_RESULT_SCHED_ALL; + } else { + kbasep_js_run_jobs_after_ctx_and_atom_release(kbdev, kctx, + katom_retained_state, runpool_ctx_attr_change); + + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); + mutex_unlock(&kbdev->pm.lock); + } + + return release_result; +} + +void kbasep_js_runpool_release_ctx_nolock(struct kbase_device *kbdev, + struct kbase_context *kctx) +{ + struct kbasep_js_atom_retained_state katom_retained_state; + + /* Setup a dummy katom_retained_state */ + kbasep_js_atom_retained_state_init_invalid(&katom_retained_state); + + kbasep_js_runpool_release_ctx_internal(kbdev, kctx, + &katom_retained_state); +} + +void kbasep_js_runpool_requeue_or_kill_ctx(struct kbase_device *kbdev, + struct kbase_context *kctx, bool has_pm_ref) +{ + KBASE_DEBUG_ASSERT(kbdev != NULL); + KBASE_DEBUG_ASSERT(kctx != NULL); + + /* This is called if and only if you've you've detached the context from + * the Runpool Queue, and not added it back to the Runpool + */ + KBASE_DEBUG_ASSERT(!kbase_ctx_flag(kctx, KCTX_SCHEDULED)); + + if (kbase_ctx_flag(kctx, KCTX_DYING)) { + /* Dying: don't requeue, but kill all jobs on the context. This + * happens asynchronously */ + dev_dbg(kbdev->dev, + "JS: ** Killing Context %p on RunPool Remove **", kctx); + kbase_js_foreach_ctx_job(kctx, &kbase_jd_cancel); + } +} + +void kbasep_js_runpool_release_ctx_and_katom_retained_state( + struct kbase_device *kbdev, struct kbase_context *kctx, + struct kbasep_js_atom_retained_state *katom_retained_state) +{ + struct kbasep_js_device_data *js_devdata; + struct kbasep_js_kctx_info *js_kctx_info; + kbasep_js_release_result release_result; + + KBASE_DEBUG_ASSERT(kbdev != NULL); + KBASE_DEBUG_ASSERT(kctx != NULL); + js_kctx_info = &kctx->jctx.sched_info; + js_devdata = &kbdev->js_data; + + mutex_lock(&js_devdata->queue_mutex); + mutex_lock(&js_kctx_info->ctx.jsctx_mutex); + mutex_lock(&js_devdata->runpool_mutex); + + release_result = kbasep_js_runpool_release_ctx_internal(kbdev, kctx, + katom_retained_state); + + /* Drop the runpool mutex to allow requeing kctx */ + mutex_unlock(&js_devdata->runpool_mutex); + + if ((release_result & KBASEP_JS_RELEASE_RESULT_WAS_DESCHEDULED) != 0u) + kbasep_js_runpool_requeue_or_kill_ctx(kbdev, kctx, true); + + /* Drop the jsctx_mutex to allow scheduling in a new context */ + + mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); + mutex_unlock(&js_devdata->queue_mutex); + + if (release_result & KBASEP_JS_RELEASE_RESULT_SCHED_ALL) + kbase_js_sched_all(kbdev); +} + +void kbasep_js_runpool_release_ctx(struct kbase_device *kbdev, + struct kbase_context *kctx) +{ + struct kbasep_js_atom_retained_state katom_retained_state; + + kbasep_js_atom_retained_state_init_invalid(&katom_retained_state); + + kbasep_js_runpool_release_ctx_and_katom_retained_state(kbdev, kctx, + &katom_retained_state); +} + +/* Variant of kbasep_js_runpool_release_ctx() that doesn't call into + * kbase_js_sched_all() */ +static void kbasep_js_runpool_release_ctx_no_schedule( + struct kbase_device *kbdev, struct kbase_context *kctx) +{ + struct kbasep_js_device_data *js_devdata; + struct kbasep_js_kctx_info *js_kctx_info; + kbasep_js_release_result release_result; + struct kbasep_js_atom_retained_state katom_retained_state_struct; + struct kbasep_js_atom_retained_state *katom_retained_state = + &katom_retained_state_struct; + + KBASE_DEBUG_ASSERT(kbdev != NULL); + KBASE_DEBUG_ASSERT(kctx != NULL); + js_kctx_info = &kctx->jctx.sched_info; + js_devdata = &kbdev->js_data; + kbasep_js_atom_retained_state_init_invalid(katom_retained_state); + + mutex_lock(&js_kctx_info->ctx.jsctx_mutex); + mutex_lock(&js_devdata->runpool_mutex); + + release_result = kbasep_js_runpool_release_ctx_internal(kbdev, kctx, + katom_retained_state); + + /* Drop the runpool mutex to allow requeing kctx */ + mutex_unlock(&js_devdata->runpool_mutex); + if ((release_result & KBASEP_JS_RELEASE_RESULT_WAS_DESCHEDULED) != 0u) + kbasep_js_runpool_requeue_or_kill_ctx(kbdev, kctx, true); + + /* Drop the jsctx_mutex to allow scheduling in a new context */ + mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); + + /* NOTE: could return release_result if the caller would like to know + * whether it should schedule a new context, but currently no callers do + */ +} + +void kbase_js_set_timeouts(struct kbase_device *kbdev) +{ + lockdep_assert_held(&kbdev->hwaccess_lock); + + kbase_backend_timeouts_changed(kbdev); +} + +static bool kbasep_js_schedule_ctx(struct kbase_device *kbdev, + struct kbase_context *kctx) +{ + struct kbasep_js_device_data *js_devdata; + struct kbasep_js_kctx_info *js_kctx_info; + unsigned long flags; + bool kctx_suspended = false; + int as_nr; + + js_devdata = &kbdev->js_data; + js_kctx_info = &kctx->jctx.sched_info; + + /* Pick available address space for this context */ + mutex_lock(&kbdev->mmu_hw_mutex); + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); + as_nr = kbase_ctx_sched_retain_ctx(kctx); + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); + mutex_unlock(&kbdev->mmu_hw_mutex); + if (as_nr == KBASEP_AS_NR_INVALID) { + as_nr = kbase_backend_find_and_release_free_address_space( + kbdev, kctx); + if (as_nr != KBASEP_AS_NR_INVALID) { + /* Attempt to retain the context again, this should + * succeed */ + mutex_lock(&kbdev->mmu_hw_mutex); + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); + as_nr = kbase_ctx_sched_retain_ctx(kctx); + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); + mutex_unlock(&kbdev->mmu_hw_mutex); + + WARN_ON(as_nr == KBASEP_AS_NR_INVALID); + } + } + if (as_nr == KBASEP_AS_NR_INVALID) + return false; /* No address spaces currently available */ + + /* + * Atomic transaction on the Context and Run Pool begins + */ + mutex_lock(&js_kctx_info->ctx.jsctx_mutex); + mutex_lock(&js_devdata->runpool_mutex); + mutex_lock(&kbdev->mmu_hw_mutex); + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); + + /* Check to see if context is dying due to kbase_job_zap_context() */ + if (kbase_ctx_flag(kctx, KCTX_DYING)) { + /* Roll back the transaction so far and return */ + kbase_ctx_sched_release_ctx(kctx); + + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); + mutex_unlock(&kbdev->mmu_hw_mutex); + mutex_unlock(&js_devdata->runpool_mutex); + mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); + + return false; + } + + KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_TRY_SCHEDULE_HEAD_CTX, kctx, NULL, + 0u, + kbasep_js_trace_get_refcnt(kbdev, kctx)); + + kbase_ctx_flag_set(kctx, KCTX_SCHEDULED); + + /* Assign context to previously chosen address space */ + if (!kbase_backend_use_ctx(kbdev, kctx, as_nr)) { + /* Roll back the transaction so far and return */ + kbase_ctx_sched_release_ctx(kctx); + kbase_ctx_flag_clear(kctx, KCTX_SCHEDULED); + + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); + mutex_unlock(&kbdev->mmu_hw_mutex); + mutex_unlock(&js_devdata->runpool_mutex); + mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); + + return false; + } + + kbdev->hwaccess.active_kctx = kctx; + +#if defined(CONFIG_MALI_GATOR_SUPPORT) + kbase_trace_mali_mmu_as_in_use(kctx->as_nr); +#endif + KBASE_TLSTREAM_TL_RET_AS_CTX(&kbdev->as[kctx->as_nr], kctx); + + /* Cause any future waiter-on-termination to wait until the context is + * descheduled */ + wake_up(&js_kctx_info->ctx.is_scheduled_wait); + + /* Re-check for suspending: a suspend could've occurred, and all the + * contexts could've been removed from the runpool before we took this + * lock. In this case, we don't want to allow this context to run jobs, + * we just want it out immediately. + * + * The DMB required to read the suspend flag was issued recently as part + * of the hwaccess_lock locking. If a suspend occurs *after* that lock + * was taken (i.e. this condition doesn't execute), then the + * kbasep_js_suspend() code will cleanup this context instead (by virtue + * of it being called strictly after the suspend flag is set, and will + * wait for this lock to drop) */ + if (kbase_pm_is_suspending(kbdev)) { + /* Cause it to leave at some later point */ + bool retained; + + retained = kbasep_js_runpool_retain_ctx_nolock(kbdev, kctx); + KBASE_DEBUG_ASSERT(retained); + + kbasep_js_clear_submit_allowed(js_devdata, kctx); + kctx_suspended = true; + } + + /* Transaction complete */ + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); + mutex_unlock(&kbdev->mmu_hw_mutex); + + /* Synchronize with any timers */ + kbase_backend_ctx_count_changed(kbdev); + + mutex_unlock(&js_devdata->runpool_mutex); + mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); + /* Note: after this point, the context could potentially get scheduled + * out immediately */ + + if (kctx_suspended) { + /* Finishing forcing out the context due to a suspend. Use a + * variant of kbasep_js_runpool_release_ctx() that doesn't + * schedule a new context, to prevent a risk of recursion back + * into this function */ + kbasep_js_runpool_release_ctx_no_schedule(kbdev, kctx); + return false; + } + return true; +} + +static bool kbase_js_use_ctx(struct kbase_device *kbdev, + struct kbase_context *kctx) +{ + unsigned long flags; + + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); + + if (kbase_ctx_flag(kctx, KCTX_SCHEDULED) && + kbase_backend_use_ctx_sched(kbdev, kctx)) { + /* Context already has ASID - mark as active */ + kbdev->hwaccess.active_kctx = kctx; + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); + return true; /* Context already scheduled */ + } + + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); + + return kbasep_js_schedule_ctx(kbdev, kctx); +} + +void kbasep_js_schedule_privileged_ctx(struct kbase_device *kbdev, + struct kbase_context *kctx) +{ + struct kbasep_js_kctx_info *js_kctx_info; + struct kbasep_js_device_data *js_devdata; + bool is_scheduled; + + KBASE_DEBUG_ASSERT(kbdev != NULL); + KBASE_DEBUG_ASSERT(kctx != NULL); + + js_devdata = &kbdev->js_data; + js_kctx_info = &kctx->jctx.sched_info; + + /* This must never be attempted whilst suspending - i.e. it should only + * happen in response to a syscall from a user-space thread */ + BUG_ON(kbase_pm_is_suspending(kbdev)); + + mutex_lock(&js_devdata->queue_mutex); + mutex_lock(&js_kctx_info->ctx.jsctx_mutex); + + /* Mark the context as privileged */ + kbase_ctx_flag_set(kctx, KCTX_PRIVILEGED); + + is_scheduled = kbase_ctx_flag(kctx, KCTX_SCHEDULED); + if (!is_scheduled) { + /* Add the context to the pullable list */ + if (kbase_js_ctx_list_add_pullable_head(kbdev, kctx, 0)) + kbase_js_sync_timers(kbdev); + + /* Fast-starting requires the jsctx_mutex to be dropped, + * because it works on multiple ctxs */ + mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); + mutex_unlock(&js_devdata->queue_mutex); + + /* Try to schedule the context in */ + kbase_js_sched_all(kbdev); + + /* Wait for the context to be scheduled in */ + wait_event(kctx->jctx.sched_info.ctx.is_scheduled_wait, + kbase_ctx_flag(kctx, KCTX_SCHEDULED)); + } else { + /* Already scheduled in - We need to retain it to keep the + * corresponding address space */ + kbasep_js_runpool_retain_ctx(kbdev, kctx); + mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); + mutex_unlock(&js_devdata->queue_mutex); + } +} +KBASE_EXPORT_TEST_API(kbasep_js_schedule_privileged_ctx); + +void kbasep_js_release_privileged_ctx(struct kbase_device *kbdev, + struct kbase_context *kctx) +{ + struct kbasep_js_kctx_info *js_kctx_info; + + KBASE_DEBUG_ASSERT(kctx != NULL); + js_kctx_info = &kctx->jctx.sched_info; + + /* We don't need to use the address space anymore */ + mutex_lock(&js_kctx_info->ctx.jsctx_mutex); + kbase_ctx_flag_clear(kctx, KCTX_PRIVILEGED); + mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); + + /* Release the context - it will be scheduled out */ + kbasep_js_runpool_release_ctx(kbdev, kctx); + + kbase_js_sched_all(kbdev); +} +KBASE_EXPORT_TEST_API(kbasep_js_release_privileged_ctx); + +void kbasep_js_suspend(struct kbase_device *kbdev) +{ + unsigned long flags; + struct kbasep_js_device_data *js_devdata; + int i; + u16 retained = 0u; + int nr_privileged_ctx = 0; + + KBASE_DEBUG_ASSERT(kbdev); + KBASE_DEBUG_ASSERT(kbase_pm_is_suspending(kbdev)); + js_devdata = &kbdev->js_data; + + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); + + /* Prevent all contexts from submitting */ + js_devdata->runpool_irq.submit_allowed = 0; + + /* Retain each of the contexts, so we can cause it to leave even if it + * had no refcount to begin with */ + for (i = BASE_MAX_NR_AS - 1; i >= 0; --i) { + struct kbase_context *kctx = kbdev->as_to_kctx[i]; + + retained = retained << 1; + + if (kctx) { + kbase_ctx_sched_retain_ctx_refcount(kctx); + retained |= 1u; + /* We can only cope with up to 1 privileged context - + * the instrumented context. It'll be suspended by + * disabling instrumentation */ + if (kbase_ctx_flag(kctx, KCTX_PRIVILEGED)) { + ++nr_privileged_ctx; + WARN_ON(nr_privileged_ctx != 1); + } + } + } + CSTD_UNUSED(nr_privileged_ctx); + + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); + + /* De-ref the previous retain to ensure each context gets pulled out + * sometime later. */ + for (i = 0; + i < BASE_MAX_NR_AS; + ++i, retained = retained >> 1) { + struct kbase_context *kctx = kbdev->as_to_kctx[i]; + + if (retained & 1u) + kbasep_js_runpool_release_ctx(kbdev, kctx); + } + + /* Caller must wait for all Power Manager active references to be + * dropped */ +} + +void kbasep_js_resume(struct kbase_device *kbdev) +{ + struct kbasep_js_device_data *js_devdata; + int js; + + KBASE_DEBUG_ASSERT(kbdev); + js_devdata = &kbdev->js_data; + KBASE_DEBUG_ASSERT(!kbase_pm_is_suspending(kbdev)); + + mutex_lock(&js_devdata->queue_mutex); + for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) { + struct kbase_context *kctx, *n; + + list_for_each_entry_safe(kctx, n, + &kbdev->js_data.ctx_list_unpullable[js], + jctx.sched_info.ctx.ctx_list_entry[js]) { + struct kbasep_js_kctx_info *js_kctx_info; + unsigned long flags; + bool timer_sync = false; + + js_kctx_info = &kctx->jctx.sched_info; + + mutex_lock(&js_kctx_info->ctx.jsctx_mutex); + mutex_lock(&js_devdata->runpool_mutex); + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); + + if (!kbase_ctx_flag(kctx, KCTX_SCHEDULED) && + kbase_js_ctx_pullable(kctx, js, false)) + timer_sync = + kbase_js_ctx_list_add_pullable_nolock( + kbdev, kctx, js); + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); + if (timer_sync) + kbase_backend_ctx_count_changed(kbdev); + mutex_unlock(&js_devdata->runpool_mutex); + mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); + } + } + mutex_unlock(&js_devdata->queue_mutex); + + /* Restart atom processing */ + kbase_js_sched_all(kbdev); + + /* JS Resume complete */ +} + +bool kbase_js_is_atom_valid(struct kbase_device *kbdev, + struct kbase_jd_atom *katom) +{ + if ((katom->core_req & BASE_JD_REQ_FS) && + (katom->core_req & (BASE_JD_REQ_CS | BASE_JD_REQ_ONLY_COMPUTE | + BASE_JD_REQ_T))) + return false; + + if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8987) && + (katom->core_req & BASE_JD_REQ_ONLY_COMPUTE) && + (katom->core_req & (BASE_JD_REQ_CS | BASE_JD_REQ_T))) + return false; + + return true; +} + +static int kbase_js_get_slot(struct kbase_device *kbdev, + struct kbase_jd_atom *katom) +{ + if (katom->core_req & BASE_JD_REQ_FS) + return 0; + + if (katom->core_req & BASE_JD_REQ_ONLY_COMPUTE) { + if (katom->device_nr == 1 && + kbdev->gpu_props.num_core_groups == 2) + return 2; + if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8987)) + return 2; + } + + return 1; +} + +bool kbase_js_dep_resolved_submit(struct kbase_context *kctx, + struct kbase_jd_atom *katom) +{ + bool enqueue_required; + + katom->slot_nr = kbase_js_get_slot(kctx->kbdev, katom); + + lockdep_assert_held(&kctx->kbdev->hwaccess_lock); + lockdep_assert_held(&kctx->jctx.lock); + + /* If slot will transition from unpullable to pullable then add to + * pullable list */ + if (jsctx_rb_none_to_pull(kctx, katom->slot_nr)) { + enqueue_required = true; + } else { + enqueue_required = false; + } + if ((katom->atom_flags & KBASE_KATOM_FLAG_X_DEP_BLOCKED) || + (katom->pre_dep && (katom->pre_dep->atom_flags & + KBASE_KATOM_FLAG_JSCTX_IN_X_DEP_LIST))) { + int prio = katom->sched_priority; + int js = katom->slot_nr; + struct jsctx_queue *queue = &kctx->jsctx_queue[prio][js]; + + list_add_tail(&katom->queue, &queue->x_dep_head); + katom->atom_flags |= KBASE_KATOM_FLAG_JSCTX_IN_X_DEP_LIST; + enqueue_required = false; + } else { + /* Check if there are lower priority jobs to soft stop */ + kbase_job_slot_ctx_priority_check_locked(kctx, katom); + + /* Add atom to ring buffer. */ + jsctx_tree_add(kctx, katom); + katom->atom_flags |= KBASE_KATOM_FLAG_JSCTX_IN_TREE; + } + + return enqueue_required; +} + +/** + * kbase_js_move_to_tree - Move atom (and any dependent atoms) to the + * runnable_tree, ready for execution + * @katom: Atom to submit + * + * It is assumed that @katom does not have KBASE_KATOM_FLAG_X_DEP_BLOCKED set, + * but is still present in the x_dep list. If @katom has a same-slot dependent + * atom then that atom (and any dependents) will also be moved. + */ +static void kbase_js_move_to_tree(struct kbase_jd_atom *katom) +{ + lockdep_assert_held(&katom->kctx->kbdev->hwaccess_lock); + + while (katom) { + WARN_ON(!(katom->atom_flags & + KBASE_KATOM_FLAG_JSCTX_IN_X_DEP_LIST)); + + if (!(katom->atom_flags & KBASE_KATOM_FLAG_X_DEP_BLOCKED)) { + list_del(&katom->queue); + katom->atom_flags &= + ~KBASE_KATOM_FLAG_JSCTX_IN_X_DEP_LIST; + jsctx_tree_add(katom->kctx, katom); + katom->atom_flags |= KBASE_KATOM_FLAG_JSCTX_IN_TREE; + } else { + break; + } + + katom = katom->post_dep; + } +} + + +/** + * kbase_js_evict_deps - Evict dependencies of a failed atom. + * @kctx: Context pointer + * @katom: Pointer to the atom that has failed. + * @js: The job slot the katom was run on. + * @prio: Priority of the katom. + * + * Remove all post dependencies of an atom from the context ringbuffers. + * + * The original atom's event_code will be propogated to all dependent atoms. + * + * Context: Caller must hold the HW access lock + */ +static void kbase_js_evict_deps(struct kbase_context *kctx, + struct kbase_jd_atom *katom, int js, int prio) +{ + struct kbase_jd_atom *x_dep = katom->x_post_dep; + struct kbase_jd_atom *next_katom = katom->post_dep; + + lockdep_assert_held(&kctx->kbdev->hwaccess_lock); + + if (next_katom) { + KBASE_DEBUG_ASSERT(next_katom->status != + KBASE_JD_ATOM_STATE_HW_COMPLETED); + next_katom->will_fail_event_code = katom->event_code; + + } + + /* Has cross slot depenency. */ + if (x_dep && (x_dep->atom_flags & (KBASE_KATOM_FLAG_JSCTX_IN_TREE | + KBASE_KATOM_FLAG_JSCTX_IN_X_DEP_LIST))) { + /* Remove dependency.*/ + x_dep->atom_flags &= ~KBASE_KATOM_FLAG_X_DEP_BLOCKED; + + /* Fail if it had a data dependency. */ + if (x_dep->atom_flags & KBASE_KATOM_FLAG_FAIL_BLOCKER) { + x_dep->will_fail_event_code = katom->event_code; + } + if (x_dep->atom_flags & KBASE_KATOM_FLAG_JSCTX_IN_X_DEP_LIST) + kbase_js_move_to_tree(x_dep); + } +} + +struct kbase_jd_atom *kbase_js_pull(struct kbase_context *kctx, int js) +{ + struct kbase_jd_atom *katom; + struct kbasep_js_device_data *js_devdata; + struct kbase_device *kbdev; + int pulled; + + KBASE_DEBUG_ASSERT(kctx); + + kbdev = kctx->kbdev; + + js_devdata = &kbdev->js_data; + lockdep_assert_held(&kbdev->hwaccess_lock); + + if (!kbasep_js_is_submit_allowed(js_devdata, kctx)) + return NULL; + if (kbase_pm_is_suspending(kbdev)) + return NULL; + + katom = jsctx_rb_peek(kctx, js); + if (!katom) + return NULL; + if (kctx->blocked_js[js][katom->sched_priority]) + return NULL; + if (atomic_read(&katom->blocked)) + return NULL; + + /* Due to ordering restrictions when unpulling atoms on failure, we do + * not allow multiple runs of fail-dep atoms from the same context to be + * present on the same slot */ + if (katom->pre_dep && atomic_read(&kctx->atoms_pulled_slot[js])) { + struct kbase_jd_atom *prev_atom = + kbase_backend_inspect_tail(kbdev, js); + + if (prev_atom && prev_atom->kctx != kctx) + return NULL; + } + + if (katom->atom_flags & KBASE_KATOM_FLAG_X_DEP_BLOCKED) { + if (katom->x_pre_dep->gpu_rb_state == + KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB || + katom->x_pre_dep->will_fail_event_code) + return NULL; + if ((katom->atom_flags & KBASE_KATOM_FLAG_FAIL_BLOCKER) && + kbase_backend_nr_atoms_on_slot(kbdev, js)) + return NULL; + } + + kbase_ctx_flag_set(kctx, KCTX_PULLED); + + pulled = atomic_inc_return(&kctx->atoms_pulled); + if (pulled == 1 && !kctx->slots_pullable) { + WARN_ON(kbase_ctx_flag(kctx, KCTX_RUNNABLE_REF)); + kbase_ctx_flag_set(kctx, KCTX_RUNNABLE_REF); + atomic_inc(&kbdev->js_data.nr_contexts_runnable); + } + atomic_inc(&kctx->atoms_pulled_slot[katom->slot_nr]); + kctx->atoms_pulled_slot_pri[katom->slot_nr][katom->sched_priority]++; + jsctx_rb_pull(kctx, katom); + + kbasep_js_runpool_retain_ctx_nolock(kbdev, kctx); + + katom->atom_flags |= KBASE_KATOM_FLAG_HOLDING_CTX_REF; + + katom->ticks = 0; + + return katom; +} + + +static void js_return_worker(struct work_struct *data) +{ + struct kbase_jd_atom *katom = container_of(data, struct kbase_jd_atom, + work); + struct kbase_context *kctx = katom->kctx; + struct kbase_device *kbdev = kctx->kbdev; + struct kbasep_js_device_data *js_devdata = &kbdev->js_data; + struct kbasep_js_kctx_info *js_kctx_info = &kctx->jctx.sched_info; + struct kbasep_js_atom_retained_state retained_state; + int js = katom->slot_nr; + int prio = katom->sched_priority; + bool timer_sync = false; + bool context_idle = false; + unsigned long flags; + base_jd_core_req core_req = katom->core_req; + u64 affinity = katom->affinity; + enum kbase_atom_coreref_state coreref_state = katom->coreref_state; + + KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTSTOP_EX(katom); + + kbase_backend_complete_wq(kbdev, katom); + + if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316)) + kbase_as_poking_timer_release_atom(kbdev, kctx, katom); + + kbasep_js_atom_retained_state_copy(&retained_state, katom); + + mutex_lock(&js_devdata->queue_mutex); + mutex_lock(&js_kctx_info->ctx.jsctx_mutex); + + atomic_dec(&kctx->atoms_pulled); + atomic_dec(&kctx->atoms_pulled_slot[js]); + + atomic_dec(&katom->blocked); + + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); + + kctx->atoms_pulled_slot_pri[js][katom->sched_priority]--; + + if (!atomic_read(&kctx->atoms_pulled_slot[js]) && + jsctx_rb_none_to_pull(kctx, js)) + timer_sync |= kbase_js_ctx_list_remove_nolock(kbdev, kctx, js); + + /* If this slot has been blocked due to soft-stopped atoms, and all + * atoms have now been processed, then unblock the slot */ + if (!kctx->atoms_pulled_slot_pri[js][prio] && + kctx->blocked_js[js][prio]) { + kctx->blocked_js[js][prio] = false; + + /* Only mark the slot as pullable if the context is not idle - + * that case is handled below */ + if (atomic_read(&kctx->atoms_pulled) && + kbase_js_ctx_pullable(kctx, js, true)) + timer_sync |= kbase_js_ctx_list_add_pullable_nolock( + kbdev, kctx, js); + } + + if (!atomic_read(&kctx->atoms_pulled)) { + if (!kctx->slots_pullable) { + WARN_ON(!kbase_ctx_flag(kctx, KCTX_RUNNABLE_REF)); + kbase_ctx_flag_clear(kctx, KCTX_RUNNABLE_REF); + atomic_dec(&kbdev->js_data.nr_contexts_runnable); + timer_sync = true; + } + + if (kctx->as_nr != KBASEP_AS_NR_INVALID && + !kbase_ctx_flag(kctx, KCTX_DYING)) { + int num_slots = kbdev->gpu_props.num_job_slots; + int slot; + + if (!kbasep_js_is_submit_allowed(js_devdata, kctx)) + kbasep_js_set_submit_allowed(js_devdata, kctx); + + for (slot = 0; slot < num_slots; slot++) { + if (kbase_js_ctx_pullable(kctx, slot, true)) + timer_sync |= + kbase_js_ctx_list_add_pullable_nolock( + kbdev, kctx, slot); + } + } + + kbase_jm_idle_ctx(kbdev, kctx); + + context_idle = true; + } + + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); + + if (context_idle) { + WARN_ON(!kbase_ctx_flag(kctx, KCTX_ACTIVE)); + kbase_ctx_flag_clear(kctx, KCTX_ACTIVE); + kbase_pm_context_idle(kbdev); + } + + if (timer_sync) + kbase_js_sync_timers(kbdev); + + mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); + mutex_unlock(&js_devdata->queue_mutex); + + katom->atom_flags &= ~KBASE_KATOM_FLAG_HOLDING_CTX_REF; + kbasep_js_runpool_release_ctx_and_katom_retained_state(kbdev, kctx, + &retained_state); + + kbase_js_sched_all(kbdev); + + kbase_backend_complete_wq_post_sched(kbdev, core_req, affinity, + coreref_state); +} + +void kbase_js_unpull(struct kbase_context *kctx, struct kbase_jd_atom *katom) +{ + lockdep_assert_held(&kctx->kbdev->hwaccess_lock); + + jsctx_rb_unpull(kctx, katom); + + WARN_ON(work_pending(&katom->work)); + + /* Block re-submission until workqueue has run */ + atomic_inc(&katom->blocked); + + kbase_job_check_leave_disjoint(kctx->kbdev, katom); + + INIT_WORK(&katom->work, js_return_worker); + queue_work(kctx->jctx.job_done_wq, &katom->work); +} + +bool kbase_js_complete_atom_wq(struct kbase_context *kctx, + struct kbase_jd_atom *katom) +{ + struct kbasep_js_kctx_info *js_kctx_info; + struct kbasep_js_device_data *js_devdata; + struct kbase_device *kbdev; + unsigned long flags; + bool timer_sync = false; + int atom_slot; + bool context_idle = false; + int prio = katom->sched_priority; + + kbdev = kctx->kbdev; + atom_slot = katom->slot_nr; + + js_kctx_info = &kctx->jctx.sched_info; + js_devdata = &kbdev->js_data; + + lockdep_assert_held(&js_kctx_info->ctx.jsctx_mutex); + + mutex_lock(&js_devdata->runpool_mutex); + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); + + if (katom->atom_flags & KBASE_KATOM_FLAG_JSCTX_IN_TREE) { + context_idle = !atomic_dec_return(&kctx->atoms_pulled); + atomic_dec(&kctx->atoms_pulled_slot[atom_slot]); + kctx->atoms_pulled_slot_pri[atom_slot][prio]--; + + if (!atomic_read(&kctx->atoms_pulled) && + !kctx->slots_pullable) { + WARN_ON(!kbase_ctx_flag(kctx, KCTX_RUNNABLE_REF)); + kbase_ctx_flag_clear(kctx, KCTX_RUNNABLE_REF); + atomic_dec(&kbdev->js_data.nr_contexts_runnable); + timer_sync = true; + } + + /* If this slot has been blocked due to soft-stopped atoms, and + * all atoms have now been processed, then unblock the slot */ + if (!kctx->atoms_pulled_slot_pri[atom_slot][prio] + && kctx->blocked_js[atom_slot][prio]) { + kctx->blocked_js[atom_slot][prio] = false; + if (kbase_js_ctx_pullable(kctx, atom_slot, true)) + timer_sync |= + kbase_js_ctx_list_add_pullable_nolock( + kbdev, kctx, atom_slot); + } + } + WARN_ON(!(katom->atom_flags & KBASE_KATOM_FLAG_JSCTX_IN_TREE)); + + if (!atomic_read(&kctx->atoms_pulled_slot[atom_slot]) && + jsctx_rb_none_to_pull(kctx, atom_slot)) { + if (!list_empty( + &kctx->jctx.sched_info.ctx.ctx_list_entry[atom_slot])) + timer_sync |= kbase_js_ctx_list_remove_nolock( + kctx->kbdev, kctx, atom_slot); + } + + /* + * If submission is disabled on this context (most likely due to an + * atom failure) and there are now no atoms left in the system then + * re-enable submission so that context can be scheduled again. + */ + if (!kbasep_js_is_submit_allowed(js_devdata, kctx) && + !atomic_read(&kctx->atoms_pulled) && + !kbase_ctx_flag(kctx, KCTX_DYING)) { + int js; + + kbasep_js_set_submit_allowed(js_devdata, kctx); + + for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) { + if (kbase_js_ctx_pullable(kctx, js, true)) + timer_sync |= + kbase_js_ctx_list_add_pullable_nolock( + kbdev, kctx, js); + } + } else if (katom->x_post_dep && + kbasep_js_is_submit_allowed(js_devdata, kctx)) { + int js; + + for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) { + if (kbase_js_ctx_pullable(kctx, js, true)) + timer_sync |= + kbase_js_ctx_list_add_pullable_nolock( + kbdev, kctx, js); + } + } + + /* Mark context as inactive. The pm reference will be dropped later in + * jd_done_worker(). + */ + if (context_idle) + kbase_ctx_flag_clear(kctx, KCTX_ACTIVE); + + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); + if (timer_sync) + kbase_backend_ctx_count_changed(kbdev); + mutex_unlock(&js_devdata->runpool_mutex); + + return context_idle; +} + +struct kbase_jd_atom *kbase_js_complete_atom(struct kbase_jd_atom *katom, + ktime_t *end_timestamp) +{ + struct kbase_device *kbdev; + struct kbase_context *kctx = katom->kctx; + struct kbase_jd_atom *x_dep = katom->x_post_dep; + + kbdev = kctx->kbdev; + + + lockdep_assert_held(&kctx->kbdev->hwaccess_lock); + + if (katom->will_fail_event_code) + katom->event_code = katom->will_fail_event_code; + + katom->status = KBASE_JD_ATOM_STATE_HW_COMPLETED; + + if (katom->event_code != BASE_JD_EVENT_DONE) { + kbase_js_evict_deps(kctx, katom, katom->slot_nr, + katom->sched_priority); + } + +#if defined(CONFIG_MALI_GATOR_SUPPORT) + kbase_trace_mali_job_slots_event(GATOR_MAKE_EVENT(GATOR_JOB_SLOT_STOP, + katom->slot_nr), NULL, 0); +#endif + + kbase_jd_done(katom, katom->slot_nr, end_timestamp, 0); + + /* Unblock cross dependency if present */ + if (x_dep && (katom->event_code == BASE_JD_EVENT_DONE || + !(x_dep->atom_flags & KBASE_KATOM_FLAG_FAIL_BLOCKER)) && + (x_dep->atom_flags & KBASE_KATOM_FLAG_X_DEP_BLOCKED)) { + bool was_pullable = kbase_js_ctx_pullable(kctx, x_dep->slot_nr, + false); + x_dep->atom_flags &= ~KBASE_KATOM_FLAG_X_DEP_BLOCKED; + kbase_js_move_to_tree(x_dep); + if (!was_pullable && kbase_js_ctx_pullable(kctx, x_dep->slot_nr, + false)) + kbase_js_ctx_list_add_pullable_nolock(kbdev, kctx, + x_dep->slot_nr); + + if (x_dep->atom_flags & KBASE_KATOM_FLAG_JSCTX_IN_TREE) + return x_dep; + } + + return NULL; +} + +void kbase_js_sched(struct kbase_device *kbdev, int js_mask) +{ + struct kbasep_js_device_data *js_devdata; + struct kbase_context *last_active; + bool timer_sync = false; + bool ctx_waiting = false; + + js_devdata = &kbdev->js_data; + + down(&js_devdata->schedule_sem); + mutex_lock(&js_devdata->queue_mutex); + + last_active = kbdev->hwaccess.active_kctx; + + while (js_mask) { + int js; + + js = ffs(js_mask) - 1; + + while (1) { + struct kbase_context *kctx; + unsigned long flags; + bool context_idle = false; + + kctx = kbase_js_ctx_list_pop_head(kbdev, js); + + if (!kctx) { + js_mask &= ~(1 << js); + break; /* No contexts on pullable list */ + } + + if (!kbase_ctx_flag(kctx, KCTX_ACTIVE)) { + context_idle = true; + + if (kbase_pm_context_active_handle_suspend( + kbdev, + KBASE_PM_SUSPEND_HANDLER_DONT_INCREASE)) { + /* Suspend pending - return context to + * queue and stop scheduling */ + mutex_lock( + &kctx->jctx.sched_info.ctx.jsctx_mutex); + if (kbase_js_ctx_list_add_pullable_head( + kctx->kbdev, kctx, js)) + kbase_js_sync_timers(kbdev); + mutex_unlock( + &kctx->jctx.sched_info.ctx.jsctx_mutex); + mutex_unlock(&js_devdata->queue_mutex); + up(&js_devdata->schedule_sem); + return; + } + kbase_ctx_flag_set(kctx, KCTX_ACTIVE); + } + + if (!kbase_js_use_ctx(kbdev, kctx)) { + mutex_lock( + &kctx->jctx.sched_info.ctx.jsctx_mutex); + /* Context can not be used at this time */ + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); + if (kbase_js_ctx_pullable(kctx, js, false) + || kbase_ctx_flag(kctx, KCTX_PRIVILEGED)) + timer_sync |= + kbase_js_ctx_list_add_pullable_head_nolock( + kctx->kbdev, kctx, js); + else + timer_sync |= + kbase_js_ctx_list_add_unpullable_nolock( + kctx->kbdev, kctx, js); + spin_unlock_irqrestore(&kbdev->hwaccess_lock, + flags); + mutex_unlock( + &kctx->jctx.sched_info.ctx.jsctx_mutex); + if (context_idle) { + WARN_ON(!kbase_ctx_flag(kctx, KCTX_ACTIVE)); + kbase_ctx_flag_clear(kctx, KCTX_ACTIVE); + kbase_pm_context_idle(kbdev); + } + + /* No more jobs can be submitted on this slot */ + js_mask &= ~(1 << js); + break; + } + mutex_lock(&kctx->jctx.sched_info.ctx.jsctx_mutex); + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); + + kbase_ctx_flag_clear(kctx, KCTX_PULLED); + + if (!kbase_jm_kick(kbdev, 1 << js)) + /* No more jobs can be submitted on this slot */ + js_mask &= ~(1 << js); + + if (!kbase_ctx_flag(kctx, KCTX_PULLED)) { + bool pullable = kbase_js_ctx_pullable(kctx, js, + true); + + /* Failed to pull jobs - push to head of list. + * Unless this context is already 'active', in + * which case it's effectively already scheduled + * so push it to the back of the list. */ + if (pullable && kctx == last_active) + timer_sync |= + kbase_js_ctx_list_add_pullable_nolock( + kctx->kbdev, + kctx, js); + else if (pullable) + timer_sync |= + kbase_js_ctx_list_add_pullable_head_nolock( + kctx->kbdev, + kctx, js); + else + timer_sync |= + kbase_js_ctx_list_add_unpullable_nolock( + kctx->kbdev, + kctx, js); + + /* If this context is not the active context, + * but the active context is pullable on this + * slot, then we need to remove the active + * marker to prevent it from submitting atoms in + * the IRQ handler, which would prevent this + * context from making progress. */ + if (last_active && kctx != last_active && + kbase_js_ctx_pullable( + last_active, js, true)) + ctx_waiting = true; + + if (context_idle) { + kbase_jm_idle_ctx(kbdev, kctx); + spin_unlock_irqrestore( + &kbdev->hwaccess_lock, + flags); + WARN_ON(!kbase_ctx_flag(kctx, KCTX_ACTIVE)); + kbase_ctx_flag_clear(kctx, KCTX_ACTIVE); + kbase_pm_context_idle(kbdev); + } else { + spin_unlock_irqrestore( + &kbdev->hwaccess_lock, + flags); + } + mutex_unlock( + &kctx->jctx.sched_info.ctx.jsctx_mutex); + + js_mask &= ~(1 << js); + break; /* Could not run atoms on this slot */ + } + + /* Push to back of list */ + if (kbase_js_ctx_pullable(kctx, js, true)) + timer_sync |= + kbase_js_ctx_list_add_pullable_nolock( + kctx->kbdev, kctx, js); + else + timer_sync |= + kbase_js_ctx_list_add_unpullable_nolock( + kctx->kbdev, kctx, js); + + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); + mutex_unlock(&kctx->jctx.sched_info.ctx.jsctx_mutex); + } + } + + if (timer_sync) + kbase_js_sync_timers(kbdev); + + if (kbdev->hwaccess.active_kctx == last_active && ctx_waiting) + kbdev->hwaccess.active_kctx = NULL; + + mutex_unlock(&js_devdata->queue_mutex); + up(&js_devdata->schedule_sem); +} + +void kbase_js_zap_context(struct kbase_context *kctx) +{ + struct kbase_device *kbdev = kctx->kbdev; + struct kbasep_js_device_data *js_devdata = &kbdev->js_data; + struct kbasep_js_kctx_info *js_kctx_info = &kctx->jctx.sched_info; + int js; + + /* + * Critical assumption: No more submission is possible outside of the + * workqueue. This is because the OS *must* prevent U/K calls (IOCTLs) + * whilst the struct kbase_context is terminating. + */ + + /* First, atomically do the following: + * - mark the context as dying + * - try to evict it from the queue */ + mutex_lock(&kctx->jctx.lock); + mutex_lock(&js_devdata->queue_mutex); + mutex_lock(&js_kctx_info->ctx.jsctx_mutex); + kbase_ctx_flag_set(kctx, KCTX_DYING); + + dev_dbg(kbdev->dev, "Zap: Try Evict Ctx %p", kctx); + + /* + * At this point we know: + * - If eviction succeeded, it was in the queue, but now no + * longer is + * - We must cancel the jobs here. No Power Manager active reference to + * release. + * - This happens asynchronously - kbase_jd_zap_context() will wait for + * those jobs to be killed. + * - If eviction failed, then it wasn't in the queue. It is one + * of the following: + * - a. it didn't have any jobs, and so is not in the Queue or + * the Run Pool (not scheduled) + * - Hence, no more work required to cancel jobs. No Power Manager + * active reference to release. + * - b. it was in the middle of a scheduling transaction (and thus must + * have at least 1 job). This can happen from a syscall or a + * kernel thread. We still hold the jsctx_mutex, and so the thread + * must be waiting inside kbasep_js_try_schedule_head_ctx(), + * before checking whether the runpool is full. That thread will + * continue after we drop the mutex, and will notice the context + * is dying. It will rollback the transaction, killing all jobs at + * the same time. kbase_jd_zap_context() will wait for those jobs + * to be killed. + * - Hence, no more work required to cancel jobs, or to release the + * Power Manager active reference. + * - c. it is scheduled, and may or may not be running jobs + * - We must cause it to leave the runpool by stopping it from + * submitting any more jobs. When it finally does leave, + * kbasep_js_runpool_requeue_or_kill_ctx() will kill all remaining jobs + * (because it is dying), release the Power Manager active reference, + * and will not requeue the context in the queue. + * kbase_jd_zap_context() will wait for those jobs to be killed. + * - Hence, work required just to make it leave the runpool. Cancelling + * jobs and releasing the Power manager active reference will be + * handled when it leaves the runpool. + */ + if (!kbase_ctx_flag(kctx, KCTX_SCHEDULED)) { + for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) { + if (!list_empty( + &kctx->jctx.sched_info.ctx.ctx_list_entry[js])) + list_del_init( + &kctx->jctx.sched_info.ctx.ctx_list_entry[js]); + } + + /* The following events require us to kill off remaining jobs + * and update PM book-keeping: + * - we evicted it correctly (it must have jobs to be in the + * Queue) + * + * These events need no action, but take this path anyway: + * - Case a: it didn't have any jobs, and was never in the Queue + * - Case b: scheduling transaction will be partially rolled- + * back (this already cancels the jobs) + */ + + KBASE_TRACE_ADD(kbdev, JM_ZAP_NON_SCHEDULED, kctx, NULL, 0u, + kbase_ctx_flag(kctx, KCTX_SCHEDULED)); + + dev_dbg(kbdev->dev, "Zap: Ctx %p scheduled=0", kctx); + + /* Only cancel jobs when we evicted from the + * queue. No Power Manager active reference was held. + * + * Having is_dying set ensures that this kills, and + * doesn't requeue */ + kbasep_js_runpool_requeue_or_kill_ctx(kbdev, kctx, false); + + mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); + mutex_unlock(&js_devdata->queue_mutex); + mutex_unlock(&kctx->jctx.lock); + } else { + unsigned long flags; + bool was_retained; + + /* Case c: didn't evict, but it is scheduled - it's in the Run + * Pool */ + KBASE_TRACE_ADD(kbdev, JM_ZAP_SCHEDULED, kctx, NULL, 0u, + kbase_ctx_flag(kctx, KCTX_SCHEDULED)); + dev_dbg(kbdev->dev, "Zap: Ctx %p is in RunPool", kctx); + + /* Disable the ctx from submitting any more jobs */ + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); + + kbasep_js_clear_submit_allowed(js_devdata, kctx); + + /* Retain and (later) release the context whilst it is is now + * disallowed from submitting jobs - ensures that someone + * somewhere will be removing the context later on */ + was_retained = kbasep_js_runpool_retain_ctx_nolock(kbdev, kctx); + + /* Since it's scheduled and we have the jsctx_mutex, it must be + * retained successfully */ + KBASE_DEBUG_ASSERT(was_retained); + + dev_dbg(kbdev->dev, "Zap: Ctx %p Kill Any Running jobs", kctx); + + /* Cancel any remaining running jobs for this kctx - if any. + * Submit is disallowed which takes effect immediately, so no + * more new jobs will appear after we do this. */ + for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) + kbase_job_slot_hardstop(kctx, js, NULL); + + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); + mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); + mutex_unlock(&js_devdata->queue_mutex); + mutex_unlock(&kctx->jctx.lock); + + dev_dbg(kbdev->dev, "Zap: Ctx %p Release (may or may not schedule out immediately)", + kctx); + + kbasep_js_runpool_release_ctx(kbdev, kctx); + } + + KBASE_TRACE_ADD(kbdev, JM_ZAP_DONE, kctx, NULL, 0u, 0u); + + /* After this, you must wait on both the + * kbase_jd_context::zero_jobs_wait and the + * kbasep_js_kctx_info::ctx::is_scheduled_waitq - to wait for the jobs + * to be destroyed, and the context to be de-scheduled (if it was on the + * runpool). + * + * kbase_jd_zap_context() will do this. */ +} + +static inline int trace_get_refcnt(struct kbase_device *kbdev, + struct kbase_context *kctx) +{ + return atomic_read(&kctx->refcount); +} + +/** + * kbase_js_foreach_ctx_job(): - Call a function on all jobs in context + * @kctx: Pointer to context. + * @callback: Pointer to function to call for each job. + * + * Call a function on all jobs belonging to a non-queued, non-running + * context, and detach the jobs from the context as it goes. + * + * Due to the locks that might be held at the time of the call, the callback + * may need to defer work on a workqueue to complete its actions (e.g. when + * cancelling jobs) + * + * Atoms will be removed from the queue, so this must only be called when + * cancelling jobs (which occurs as part of context destruction). + * + * The locking conditions on the caller are as follows: + * - it will be holding kbasep_js_kctx_info::ctx::jsctx_mutex. + */ +static void kbase_js_foreach_ctx_job(struct kbase_context *kctx, + kbasep_js_ctx_job_cb callback) +{ + struct kbase_device *kbdev; + unsigned long flags; + u32 js; + + kbdev = kctx->kbdev; + + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); + + KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_POLICY_FOREACH_CTX_JOBS, kctx, NULL, + 0u, trace_get_refcnt(kbdev, kctx)); + + /* Invoke callback on jobs on each slot in turn */ + for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) + jsctx_queue_foreach(kctx, js, callback); + + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); +} diff --git a/drivers/gpu/arm/midgard/mali_kbase_js.h b/drivers/gpu/arm/midgard/mali_kbase_js.h new file mode 100644 index 00000000000000..ddada8e468a19b --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_js.h @@ -0,0 +1,925 @@ +/* + * + * (C) COPYRIGHT 2011-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + + +/** + * @file mali_kbase_js.h + * Job Scheduler APIs. + */ + +#ifndef _KBASE_JS_H_ +#define _KBASE_JS_H_ + +#include "mali_kbase_js_defs.h" +#include "mali_kbase_context.h" +#include "mali_kbase_defs.h" +#include "mali_kbase_debug.h" + +#include "mali_kbase_js_ctx_attr.h" + +/** + * @addtogroup base_api + * @{ + */ + +/** + * @addtogroup base_kbase_api + * @{ + */ + +/** + * @addtogroup kbase_js Job Scheduler Internal APIs + * @{ + * + * These APIs are Internal to KBase. + */ + +/** + * @brief Initialize the Job Scheduler + * + * The struct kbasep_js_device_data sub-structure of \a kbdev must be zero + * initialized before passing to the kbasep_js_devdata_init() function. This is + * to give efficient error path code. + */ +int kbasep_js_devdata_init(struct kbase_device * const kbdev); + +/** + * @brief Halt the Job Scheduler. + * + * It is safe to call this on \a kbdev even if it the kbasep_js_device_data + * sub-structure was never initialized/failed initialization, to give efficient + * error-path code. + * + * For this to work, the struct kbasep_js_device_data sub-structure of \a kbdev must + * be zero initialized before passing to the kbasep_js_devdata_init() + * function. This is to give efficient error path code. + * + * It is a Programming Error to call this whilst there are still kbase_context + * structures registered with this scheduler. + * + */ +void kbasep_js_devdata_halt(struct kbase_device *kbdev); + +/** + * @brief Terminate the Job Scheduler + * + * It is safe to call this on \a kbdev even if it the kbasep_js_device_data + * sub-structure was never initialized/failed initialization, to give efficient + * error-path code. + * + * For this to work, the struct kbasep_js_device_data sub-structure of \a kbdev must + * be zero initialized before passing to the kbasep_js_devdata_init() + * function. This is to give efficient error path code. + * + * It is a Programming Error to call this whilst there are still kbase_context + * structures registered with this scheduler. + */ +void kbasep_js_devdata_term(struct kbase_device *kbdev); + +/** + * @brief Initialize the Scheduling Component of a struct kbase_context on the Job Scheduler. + * + * This effectively registers a struct kbase_context with a Job Scheduler. + * + * It does not register any jobs owned by the struct kbase_context with the scheduler. + * Those must be separately registered by kbasep_js_add_job(). + * + * The struct kbase_context must be zero intitialized before passing to the + * kbase_js_init() function. This is to give efficient error path code. + */ +int kbasep_js_kctx_init(struct kbase_context * const kctx); + +/** + * @brief Terminate the Scheduling Component of a struct kbase_context on the Job Scheduler + * + * This effectively de-registers a struct kbase_context from its Job Scheduler + * + * It is safe to call this on a struct kbase_context that has never had or failed + * initialization of its jctx.sched_info member, to give efficient error-path + * code. + * + * For this to work, the struct kbase_context must be zero intitialized before passing + * to the kbase_js_init() function. + * + * It is a Programming Error to call this whilst there are still jobs + * registered with this context. + */ +void kbasep_js_kctx_term(struct kbase_context *kctx); + +/** + * @brief Add a job chain to the Job Scheduler, and take necessary actions to + * schedule the context/run the job. + * + * This atomically does the following: + * - Update the numbers of jobs information + * - Add the job to the run pool if necessary (part of init_job) + * + * Once this is done, then an appropriate action is taken: + * - If the ctx is scheduled, it attempts to start the next job (which might be + * this added job) + * - Otherwise, and if this is the first job on the context, it enqueues it on + * the Policy Queue + * + * The Policy's Queue can be updated by this in the following ways: + * - In the above case that this is the first job on the context + * - If the context is high priority and the context is not scheduled, then it + * could cause the Policy to schedule out a low-priority context, allowing + * this context to be scheduled in. + * + * If the context is already scheduled on the RunPool, then adding a job to it + * is guarenteed not to update the Policy Queue. And so, the caller is + * guarenteed to not need to try scheduling a context from the Run Pool - it + * can safely assert that the result is false. + * + * It is a programming error to have more than U32_MAX jobs in flight at a time. + * + * The following locking conditions are made on the caller: + * - it must \em not hold kbasep_js_kctx_info::ctx::jsctx_mutex. + * - it must \em not hold hwaccess_lock (as this will be obtained internally) + * - it must \em not hold kbasep_js_device_data::runpool_mutex (as this will be + * obtained internally) + * - it must \em not hold kbasep_jd_device_data::queue_mutex (again, it's used internally). + * + * @return true indicates that the Policy Queue was updated, and so the + * caller will need to try scheduling a context onto the Run Pool. + * @return false indicates that no updates were made to the Policy Queue, + * so no further action is required from the caller. This is \b always returned + * when the context is currently scheduled. + */ +bool kbasep_js_add_job(struct kbase_context *kctx, struct kbase_jd_atom *atom); + +/** + * @brief Remove a job chain from the Job Scheduler, except for its 'retained state'. + * + * Completely removing a job requires several calls: + * - kbasep_js_copy_atom_retained_state(), to capture the 'retained state' of + * the atom + * - kbasep_js_remove_job(), to partially remove the atom from the Job Scheduler + * - kbasep_js_runpool_release_ctx_and_katom_retained_state(), to release the + * remaining state held as part of the job having been run. + * + * In the common case of atoms completing normally, this set of actions is more optimal for spinlock purposes than having kbasep_js_remove_job() handle all of the actions. + * + * In the case of cancelling atoms, it is easier to call kbasep_js_remove_cancelled_job(), which handles all the necessary actions. + * + * It is a programming error to call this when: + * - \a atom is not a job belonging to kctx. + * - \a atom has already been removed from the Job Scheduler. + * - \a atom is still in the runpool + * + * Do not use this for removing jobs being killed by kbase_jd_cancel() - use + * kbasep_js_remove_cancelled_job() instead. + * + * The following locking conditions are made on the caller: + * - it must hold kbasep_js_kctx_info::ctx::jsctx_mutex. + * + */ +void kbasep_js_remove_job(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *atom); + +/** + * @brief Completely remove a job chain from the Job Scheduler, in the case + * where the job chain was cancelled. + * + * This is a variant of kbasep_js_remove_job() that takes care of removing all + * of the retained state too. This is generally useful for cancelled atoms, + * which need not be handled in an optimal way. + * + * It is a programming error to call this when: + * - \a atom is not a job belonging to kctx. + * - \a atom has already been removed from the Job Scheduler. + * - \a atom is still in the runpool: + * - it is not being killed with kbasep_jd_cancel() + * + * The following locking conditions are made on the caller: + * - it must hold kbasep_js_kctx_info::ctx::jsctx_mutex. + * - it must \em not hold the hwaccess_lock, (as this will be obtained + * internally) + * - it must \em not hold kbasep_js_device_data::runpool_mutex (as this could be + * obtained internally) + * + * @return true indicates that ctx attributes have changed and the caller + * should call kbase_js_sched_all() to try to run more jobs + * @return false otherwise + */ +bool kbasep_js_remove_cancelled_job(struct kbase_device *kbdev, + struct kbase_context *kctx, + struct kbase_jd_atom *katom); + +/** + * @brief Refcount a context as being busy, preventing it from being scheduled + * out. + * + * @note This function can safely be called from IRQ context. + * + * The following locking conditions are made on the caller: + * - it must \em not hold mmu_hw_mutex and hwaccess_lock, because they will be + * used internally. + * + * @return value != false if the retain succeeded, and the context will not be scheduled out. + * @return false if the retain failed (because the context is being/has been scheduled out). + */ +bool kbasep_js_runpool_retain_ctx(struct kbase_device *kbdev, struct kbase_context *kctx); + +/** + * @brief Refcount a context as being busy, preventing it from being scheduled + * out. + * + * @note This function can safely be called from IRQ context. + * + * The following locks must be held by the caller: + * - mmu_hw_mutex, hwaccess_lock + * + * @return value != false if the retain succeeded, and the context will not be scheduled out. + * @return false if the retain failed (because the context is being/has been scheduled out). + */ +bool kbasep_js_runpool_retain_ctx_nolock(struct kbase_device *kbdev, struct kbase_context *kctx); + +/** + * @brief Lookup a context in the Run Pool based upon its current address space + * and ensure that is stays scheduled in. + * + * The context is refcounted as being busy to prevent it from scheduling + * out. It must be released with kbasep_js_runpool_release_ctx() when it is no + * longer required to stay scheduled in. + * + * @note This function can safely be called from IRQ context. + * + * The following locking conditions are made on the caller: + * - it must \em not hold the hwaccess_lock, because it will be used internally. + * If the hwaccess_lock is already held, then the caller should use + * kbasep_js_runpool_lookup_ctx_nolock() instead. + * + * @return a valid struct kbase_context on success, which has been refcounted as being busy. + * @return NULL on failure, indicating that no context was found in \a as_nr + */ +struct kbase_context *kbasep_js_runpool_lookup_ctx(struct kbase_device *kbdev, int as_nr); + +/** + * @brief Handling the requeuing/killing of a context that was evicted from the + * policy queue or runpool. + * + * This should be used whenever handing off a context that has been evicted + * from the policy queue or the runpool: + * - If the context is not dying and has jobs, it gets re-added to the policy + * queue + * - Otherwise, it is not added + * + * In addition, if the context is dying the jobs are killed asynchronously. + * + * In all cases, the Power Manager active reference is released + * (kbase_pm_context_idle()) whenever the has_pm_ref parameter is true. \a + * has_pm_ref must be set to false whenever the context was not previously in + * the runpool and does not hold a Power Manager active refcount. Note that + * contexts in a rollback of kbasep_js_try_schedule_head_ctx() might have an + * active refcount even though they weren't in the runpool. + * + * The following locking conditions are made on the caller: + * - it must hold kbasep_js_kctx_info::ctx::jsctx_mutex. + * - it must \em not hold kbasep_jd_device_data::queue_mutex (as this will be + * obtained internally) + */ +void kbasep_js_runpool_requeue_or_kill_ctx(struct kbase_device *kbdev, struct kbase_context *kctx, bool has_pm_ref); + +/** + * @brief Release a refcount of a context being busy, allowing it to be + * scheduled out. + * + * When the refcount reaches zero and the context \em might be scheduled out + * (depending on whether the Scheudling Policy has deemed it so, or if it has run + * out of jobs). + * + * If the context does get scheduled out, then The following actions will be + * taken as part of deschduling a context: + * - For the context being descheduled: + * - If the context is in the processing of dying (all the jobs are being + * removed from it), then descheduling also kills off any jobs remaining in the + * context. + * - If the context is not dying, and any jobs remain after descheduling the + * context then it is re-enqueued to the Policy's Queue. + * - Otherwise, the context is still known to the scheduler, but remains absent + * from the Policy Queue until a job is next added to it. + * - In all descheduling cases, the Power Manager active reference (obtained + * during kbasep_js_try_schedule_head_ctx()) is released (kbase_pm_context_idle()). + * + * Whilst the context is being descheduled, this also handles actions that + * cause more atoms to be run: + * - Attempt submitting atoms when the Context Attributes on the Runpool have + * changed. This is because the context being scheduled out could mean that + * there are more opportunities to run atoms. + * - Attempt submitting to a slot that was previously blocked due to affinity + * restrictions. This is usually only necessary when releasing a context + * happens as part of completing a previous job, but is harmless nonetheless. + * - Attempt scheduling in a new context (if one is available), and if necessary, + * running a job from that new context. + * + * Unlike retaining a context in the runpool, this function \b cannot be called + * from IRQ context. + * + * It is a programming error to call this on a \a kctx that is not currently + * scheduled, or that already has a zero refcount. + * + * The following locking conditions are made on the caller: + * - it must \em not hold the hwaccess_lock, because it will be used internally. + * - it must \em not hold kbasep_js_kctx_info::ctx::jsctx_mutex. + * - it must \em not hold kbasep_js_device_data::runpool_mutex (as this will be + * obtained internally) + * - it must \em not hold the kbase_device::mmu_hw_mutex (as this will be + * obtained internally) + * - it must \em not hold kbasep_jd_device_data::queue_mutex (as this will be + * obtained internally) + * + */ +void kbasep_js_runpool_release_ctx(struct kbase_device *kbdev, struct kbase_context *kctx); + +/** + * @brief Variant of kbasep_js_runpool_release_ctx() that handles additional + * actions from completing an atom. + * + * This is usually called as part of completing an atom and releasing the + * refcount on the context held by the atom. + * + * Therefore, the extra actions carried out are part of handling actions queued + * on a completed atom, namely: + * - Releasing the atom's context attributes + * - Retrying the submission on a particular slot, because we couldn't submit + * on that slot from an IRQ handler. + * + * The locking conditions of this function are the same as those for + * kbasep_js_runpool_release_ctx() + */ +void kbasep_js_runpool_release_ctx_and_katom_retained_state(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbasep_js_atom_retained_state *katom_retained_state); + +/** + * @brief Variant of kbase_js_runpool_release_ctx() that assumes that + * kbasep_js_device_data::runpool_mutex and + * kbasep_js_kctx_info::ctx::jsctx_mutex are held by the caller, and does not + * attempt to schedule new contexts. + */ +void kbasep_js_runpool_release_ctx_nolock(struct kbase_device *kbdev, + struct kbase_context *kctx); + +/** + * @brief Schedule in a privileged context + * + * This schedules a context in regardless of the context priority. + * If the runpool is full, a context will be forced out of the runpool and the function will wait + * for the new context to be scheduled in. + * The context will be kept scheduled in (and the corresponding address space reserved) until + * kbasep_js_release_privileged_ctx is called). + * + * The following locking conditions are made on the caller: + * - it must \em not hold the hwaccess_lock, because it will be used internally. + * - it must \em not hold kbasep_js_device_data::runpool_mutex (as this will be + * obtained internally) + * - it must \em not hold the kbase_device::mmu_hw_mutex (as this will be + * obtained internally) + * - it must \em not hold kbasep_jd_device_data::queue_mutex (again, it's used internally). + * - it must \em not hold kbasep_js_kctx_info::ctx::jsctx_mutex, because it will + * be used internally. + * + */ +void kbasep_js_schedule_privileged_ctx(struct kbase_device *kbdev, struct kbase_context *kctx); + +/** + * @brief Release a privileged context, allowing it to be scheduled out. + * + * See kbasep_js_runpool_release_ctx for potential side effects. + * + * The following locking conditions are made on the caller: + * - it must \em not hold the hwaccess_lock, because it will be used internally. + * - it must \em not hold kbasep_js_kctx_info::ctx::jsctx_mutex. + * - it must \em not hold kbasep_js_device_data::runpool_mutex (as this will be + * obtained internally) + * - it must \em not hold the kbase_device::mmu_hw_mutex (as this will be + * obtained internally) + * + */ +void kbasep_js_release_privileged_ctx(struct kbase_device *kbdev, struct kbase_context *kctx); + +/** + * @brief Try to submit the next job on each slot + * + * The following locks may be used: + * - kbasep_js_device_data::runpool_mutex + * - hwaccess_lock + */ +void kbase_js_try_run_jobs(struct kbase_device *kbdev); + +/** + * @brief Suspend the job scheduler during a Power Management Suspend event. + * + * Causes all contexts to be removed from the runpool, and prevents any + * contexts from (re)entering the runpool. + * + * This does not handle suspending the one privileged context: the caller must + * instead do this by by suspending the GPU HW Counter Instrumentation. + * + * This will eventually cause all Power Management active references held by + * contexts on the runpool to be released, without running any more atoms. + * + * The caller must then wait for all Power Mangement active refcount to become + * zero before completing the suspend. + * + * The emptying mechanism may take some time to complete, since it can wait for + * jobs to complete naturally instead of forcing them to end quickly. However, + * this is bounded by the Job Scheduler's Job Timeouts. Hence, this + * function is guaranteed to complete in a finite time. + */ +void kbasep_js_suspend(struct kbase_device *kbdev); + +/** + * @brief Resume the Job Scheduler after a Power Management Resume event. + * + * This restores the actions from kbasep_js_suspend(): + * - Schedules contexts back into the runpool + * - Resumes running atoms on the GPU + */ +void kbasep_js_resume(struct kbase_device *kbdev); + +/** + * @brief Submit an atom to the job scheduler. + * + * The atom is enqueued on the context's ringbuffer. The caller must have + * ensured that all dependencies can be represented in the ringbuffer. + * + * Caller must hold jctx->lock + * + * @param[in] kctx Context pointer + * @param[in] atom Pointer to the atom to submit + * + * @return Whether the context requires to be enqueued. */ +bool kbase_js_dep_resolved_submit(struct kbase_context *kctx, + struct kbase_jd_atom *katom); + +/** + * jsctx_ll_flush_to_rb() - Pushes atoms from the linked list to ringbuffer. + * @kctx: Context Pointer + * @prio: Priority (specifies the queue together with js). + * @js: Job slot (specifies the queue together with prio). + * + * Pushes all possible atoms from the linked list to the ringbuffer. + * Number of atoms are limited to free space in the ringbuffer and + * number of available atoms in the linked list. + * + */ +void jsctx_ll_flush_to_rb(struct kbase_context *kctx, int prio, int js); +/** + * @brief Pull an atom from a context in the job scheduler for execution. + * + * The atom will not be removed from the ringbuffer at this stage. + * + * The HW access lock must be held when calling this function. + * + * @param[in] kctx Context to pull from + * @param[in] js Job slot to pull from + * @return Pointer to an atom, or NULL if there are no atoms for this + * slot that can be currently run. + */ +struct kbase_jd_atom *kbase_js_pull(struct kbase_context *kctx, int js); + +/** + * @brief Return an atom to the job scheduler ringbuffer. + * + * An atom is 'unpulled' if execution is stopped but intended to be returned to + * later. The most common reason for this is that the atom has been + * soft-stopped. + * + * Note that if multiple atoms are to be 'unpulled', they must be returned in + * the reverse order to which they were originally pulled. It is a programming + * error to return atoms in any other order. + * + * The HW access lock must be held when calling this function. + * + * @param[in] kctx Context pointer + * @param[in] atom Pointer to the atom to unpull + */ +void kbase_js_unpull(struct kbase_context *kctx, struct kbase_jd_atom *katom); + +/** + * @brief Complete an atom from jd_done_worker(), removing it from the job + * scheduler ringbuffer. + * + * If the atom failed then all dependee atoms marked for failure propagation + * will also fail. + * + * @param[in] kctx Context pointer + * @param[in] katom Pointer to the atom to complete + * @return true if the context is now idle (no jobs pulled) + * false otherwise + */ +bool kbase_js_complete_atom_wq(struct kbase_context *kctx, + struct kbase_jd_atom *katom); + +/** + * @brief Complete an atom. + * + * Most of the work required to complete an atom will be performed by + * jd_done_worker(). + * + * The HW access lock must be held when calling this function. + * + * @param[in] katom Pointer to the atom to complete + * @param[in] end_timestamp The time that the atom completed (may be NULL) + * + * Return: Atom that has now been unblocked and can now be run, or NULL if none + */ +struct kbase_jd_atom *kbase_js_complete_atom(struct kbase_jd_atom *katom, + ktime_t *end_timestamp); + +/** + * @brief Submit atoms from all available contexts. + * + * This will attempt to submit as many jobs as possible to the provided job + * slots. It will exit when either all job slots are full, or all contexts have + * been used. + * + * @param[in] kbdev Device pointer + * @param[in] js_mask Mask of job slots to submit to + */ +void kbase_js_sched(struct kbase_device *kbdev, int js_mask); + +/** + * kbase_jd_zap_context - Attempt to deschedule a context that is being + * destroyed + * @kctx: Context pointer + * + * This will attempt to remove a context from any internal job scheduler queues + * and perform any other actions to ensure a context will not be submitted + * from. + * + * If the context is currently scheduled, then the caller must wait for all + * pending jobs to complete before taking any further action. + */ +void kbase_js_zap_context(struct kbase_context *kctx); + +/** + * @brief Validate an atom + * + * This will determine whether the atom can be scheduled onto the GPU. Atoms + * with invalid combinations of core requirements will be rejected. + * + * @param[in] kbdev Device pointer + * @param[in] katom Atom to validate + * @return true if atom is valid + * false otherwise + */ +bool kbase_js_is_atom_valid(struct kbase_device *kbdev, + struct kbase_jd_atom *katom); + +/** + * kbase_js_set_timeouts - update all JS timeouts with user specified data + * @kbdev: Device pointer + * + * Timeouts are specified through the 'js_timeouts' sysfs file. If a timeout is + * set to a positive number then that becomes the new value used, if a timeout + * is negative then the default is set. + */ +void kbase_js_set_timeouts(struct kbase_device *kbdev); + +/* + * Helpers follow + */ + +/** + * @brief Check that a context is allowed to submit jobs on this policy + * + * The purpose of this abstraction is to hide the underlying data size, and wrap up + * the long repeated line of code. + * + * As with any bool, never test the return value with true. + * + * The caller must hold hwaccess_lock. + */ +static inline bool kbasep_js_is_submit_allowed(struct kbasep_js_device_data *js_devdata, struct kbase_context *kctx) +{ + u16 test_bit; + + /* Ensure context really is scheduled in */ + KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID); + KBASE_DEBUG_ASSERT(kbase_ctx_flag(kctx, KCTX_SCHEDULED)); + + test_bit = (u16) (1u << kctx->as_nr); + + return (bool) (js_devdata->runpool_irq.submit_allowed & test_bit); +} + +/** + * @brief Allow a context to submit jobs on this policy + * + * The purpose of this abstraction is to hide the underlying data size, and wrap up + * the long repeated line of code. + * + * The caller must hold hwaccess_lock. + */ +static inline void kbasep_js_set_submit_allowed(struct kbasep_js_device_data *js_devdata, struct kbase_context *kctx) +{ + u16 set_bit; + + /* Ensure context really is scheduled in */ + KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID); + KBASE_DEBUG_ASSERT(kbase_ctx_flag(kctx, KCTX_SCHEDULED)); + + set_bit = (u16) (1u << kctx->as_nr); + + dev_dbg(kctx->kbdev->dev, "JS: Setting Submit Allowed on %p (as=%d)", kctx, kctx->as_nr); + + js_devdata->runpool_irq.submit_allowed |= set_bit; +} + +/** + * @brief Prevent a context from submitting more jobs on this policy + * + * The purpose of this abstraction is to hide the underlying data size, and wrap up + * the long repeated line of code. + * + * The caller must hold hwaccess_lock. + */ +static inline void kbasep_js_clear_submit_allowed(struct kbasep_js_device_data *js_devdata, struct kbase_context *kctx) +{ + u16 clear_bit; + u16 clear_mask; + + /* Ensure context really is scheduled in */ + KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID); + KBASE_DEBUG_ASSERT(kbase_ctx_flag(kctx, KCTX_SCHEDULED)); + + clear_bit = (u16) (1u << kctx->as_nr); + clear_mask = ~clear_bit; + + dev_dbg(kctx->kbdev->dev, "JS: Clearing Submit Allowed on %p (as=%d)", kctx, kctx->as_nr); + + js_devdata->runpool_irq.submit_allowed &= clear_mask; +} + +/** + * @brief Manage the 'retry_submit_on_slot' part of a kbase_jd_atom + */ +static inline void kbasep_js_clear_job_retry_submit(struct kbase_jd_atom *atom) +{ + atom->retry_submit_on_slot = KBASEP_JS_RETRY_SUBMIT_SLOT_INVALID; +} + +/** + * Mark a slot as requiring resubmission by carrying that information on a + * completing atom. + * + * @note This can ASSERT in debug builds if the submit slot has been set to + * something other than the current value for @a js. This is because you might + * be unintentionally stopping more jobs being submitted on the old submit + * slot, and that might cause a scheduling-hang. + * + * @note If you can guarantee that the atoms for the original slot will be + * submitted on some other slot, then call kbasep_js_clear_job_retry_submit() + * first to silence the ASSERT. + */ +static inline void kbasep_js_set_job_retry_submit_slot(struct kbase_jd_atom *atom, int js) +{ + KBASE_DEBUG_ASSERT(0 <= js && js <= BASE_JM_MAX_NR_SLOTS); + KBASE_DEBUG_ASSERT((atom->retry_submit_on_slot == + KBASEP_JS_RETRY_SUBMIT_SLOT_INVALID) + || (atom->retry_submit_on_slot == js)); + + atom->retry_submit_on_slot = js; +} + +/** + * Create an initial 'invalid' atom retained state, that requires no + * atom-related work to be done on releasing with + * kbasep_js_runpool_release_ctx_and_katom_retained_state() + */ +static inline void kbasep_js_atom_retained_state_init_invalid(struct kbasep_js_atom_retained_state *retained_state) +{ + retained_state->event_code = BASE_JD_EVENT_NOT_STARTED; + retained_state->core_req = KBASEP_JS_ATOM_RETAINED_STATE_CORE_REQ_INVALID; + retained_state->retry_submit_on_slot = KBASEP_JS_RETRY_SUBMIT_SLOT_INVALID; +} + +/** + * Copy atom state that can be made available after jd_done_nolock() is called + * on that atom. + */ +static inline void kbasep_js_atom_retained_state_copy(struct kbasep_js_atom_retained_state *retained_state, const struct kbase_jd_atom *katom) +{ + retained_state->event_code = katom->event_code; + retained_state->core_req = katom->core_req; + retained_state->retry_submit_on_slot = katom->retry_submit_on_slot; + retained_state->sched_priority = katom->sched_priority; + retained_state->device_nr = katom->device_nr; +} + +/** + * @brief Determine whether an atom has finished (given its retained state), + * and so should be given back to userspace/removed from the system. + * + * Reasons for an atom not finishing include: + * - Being soft-stopped (and so, the atom should be resubmitted sometime later) + * + * @param[in] katom_retained_state the retained state of the atom to check + * @return false if the atom has not finished + * @return !=false if the atom has finished + */ +static inline bool kbasep_js_has_atom_finished(const struct kbasep_js_atom_retained_state *katom_retained_state) +{ + return (bool) (katom_retained_state->event_code != BASE_JD_EVENT_STOPPED && katom_retained_state->event_code != BASE_JD_EVENT_REMOVED_FROM_NEXT); +} + +/** + * @brief Determine whether a struct kbasep_js_atom_retained_state is valid + * + * An invalid struct kbasep_js_atom_retained_state is allowed, and indicates that the + * code should just ignore it. + * + * @param[in] katom_retained_state the atom's retained state to check + * @return false if the retained state is invalid, and can be ignored + * @return !=false if the retained state is valid + */ +static inline bool kbasep_js_atom_retained_state_is_valid(const struct kbasep_js_atom_retained_state *katom_retained_state) +{ + return (bool) (katom_retained_state->core_req != KBASEP_JS_ATOM_RETAINED_STATE_CORE_REQ_INVALID); +} + +static inline bool kbasep_js_get_atom_retry_submit_slot(const struct kbasep_js_atom_retained_state *katom_retained_state, int *res) +{ + int js = katom_retained_state->retry_submit_on_slot; + + *res = js; + return (bool) (js >= 0); +} + +/** + * @brief Variant of kbasep_js_runpool_lookup_ctx() that can be used when the + * context is guaranteed to be already previously retained. + * + * It is a programming error to supply the \a as_nr of a context that has not + * been previously retained/has a busy refcount of zero. The only exception is + * when there is no ctx in \a as_nr (NULL returned). + * + * The following locking conditions are made on the caller: + * - it must \em not hold the hwaccess_lock, because it will be used internally. + * + * @return a valid struct kbase_context on success, with a refcount that is guaranteed + * to be non-zero and unmodified by this function. + * @return NULL on failure, indicating that no context was found in \a as_nr + */ +static inline struct kbase_context *kbasep_js_runpool_lookup_ctx_noretain(struct kbase_device *kbdev, int as_nr) +{ + struct kbase_context *found_kctx; + + KBASE_DEBUG_ASSERT(kbdev != NULL); + KBASE_DEBUG_ASSERT(0 <= as_nr && as_nr < BASE_MAX_NR_AS); + + found_kctx = kbdev->as_to_kctx[as_nr]; + KBASE_DEBUG_ASSERT(found_kctx == NULL || + atomic_read(&found_kctx->refcount) > 0); + + return found_kctx; +} + +/* + * The following locking conditions are made on the caller: + * - The caller must hold the kbasep_js_kctx_info::ctx::jsctx_mutex. + * - The caller must hold the kbasep_js_device_data::runpool_mutex + */ +static inline void kbase_js_runpool_inc_context_count( + struct kbase_device *kbdev, + struct kbase_context *kctx) +{ + struct kbasep_js_device_data *js_devdata; + struct kbasep_js_kctx_info *js_kctx_info; + + KBASE_DEBUG_ASSERT(kbdev != NULL); + KBASE_DEBUG_ASSERT(kctx != NULL); + + js_devdata = &kbdev->js_data; + js_kctx_info = &kctx->jctx.sched_info; + + lockdep_assert_held(&js_kctx_info->ctx.jsctx_mutex); + lockdep_assert_held(&js_devdata->runpool_mutex); + + /* Track total contexts */ + KBASE_DEBUG_ASSERT(js_devdata->nr_all_contexts_running < S8_MAX); + ++(js_devdata->nr_all_contexts_running); + + if (!kbase_ctx_flag(kctx, KCTX_SUBMIT_DISABLED)) { + /* Track contexts that can submit jobs */ + KBASE_DEBUG_ASSERT(js_devdata->nr_user_contexts_running < + S8_MAX); + ++(js_devdata->nr_user_contexts_running); + } +} + +/* + * The following locking conditions are made on the caller: + * - The caller must hold the kbasep_js_kctx_info::ctx::jsctx_mutex. + * - The caller must hold the kbasep_js_device_data::runpool_mutex + */ +static inline void kbase_js_runpool_dec_context_count( + struct kbase_device *kbdev, + struct kbase_context *kctx) +{ + struct kbasep_js_device_data *js_devdata; + struct kbasep_js_kctx_info *js_kctx_info; + + KBASE_DEBUG_ASSERT(kbdev != NULL); + KBASE_DEBUG_ASSERT(kctx != NULL); + + js_devdata = &kbdev->js_data; + js_kctx_info = &kctx->jctx.sched_info; + + lockdep_assert_held(&js_kctx_info->ctx.jsctx_mutex); + lockdep_assert_held(&js_devdata->runpool_mutex); + + /* Track total contexts */ + --(js_devdata->nr_all_contexts_running); + KBASE_DEBUG_ASSERT(js_devdata->nr_all_contexts_running >= 0); + + if (!kbase_ctx_flag(kctx, KCTX_SUBMIT_DISABLED)) { + /* Track contexts that can submit jobs */ + --(js_devdata->nr_user_contexts_running); + KBASE_DEBUG_ASSERT(js_devdata->nr_user_contexts_running >= 0); + } +} + + +/** + * @brief Submit atoms from all available contexts to all job slots. + * + * This will attempt to submit as many jobs as possible. It will exit when + * either all job slots are full, or all contexts have been used. + * + * @param[in] kbdev Device pointer + */ +static inline void kbase_js_sched_all(struct kbase_device *kbdev) +{ + kbase_js_sched(kbdev, (1 << kbdev->gpu_props.num_job_slots) - 1); +} + +extern const int +kbasep_js_atom_priority_to_relative[BASE_JD_NR_PRIO_LEVELS]; + +extern const base_jd_prio +kbasep_js_relative_priority_to_atom[KBASE_JS_ATOM_SCHED_PRIO_COUNT]; + +/** + * kbasep_js_atom_prio_to_sched_prio(): - Convert atom priority (base_jd_prio) + * to relative ordering + * @atom_prio: Priority ID to translate. + * + * Atom priority values for @ref base_jd_prio cannot be compared directly to + * find out which are higher or lower. + * + * This function will convert base_jd_prio values for successively lower + * priorities into a monotonically increasing sequence. That is, the lower the + * base_jd_prio priority, the higher the value produced by this function. This + * is in accordance with how the rest of the kernel treates priority. + * + * The mapping is 1:1 and the size of the valid input range is the same as the + * size of the valid output range, i.e. + * KBASE_JS_ATOM_SCHED_PRIO_COUNT == BASE_JD_NR_PRIO_LEVELS + * + * Note This must be kept in sync with BASE_JD_PRIO_<...> definitions + * + * Return: On success: a value in the inclusive range + * 0..KBASE_JS_ATOM_SCHED_PRIO_COUNT-1. On failure: + * KBASE_JS_ATOM_SCHED_PRIO_INVALID + */ +static inline int kbasep_js_atom_prio_to_sched_prio(base_jd_prio atom_prio) +{ + if (atom_prio >= BASE_JD_NR_PRIO_LEVELS) + return KBASE_JS_ATOM_SCHED_PRIO_INVALID; + + return kbasep_js_atom_priority_to_relative[atom_prio]; +} + +static inline base_jd_prio kbasep_js_sched_prio_to_atom_prio(int sched_prio) +{ + unsigned int prio_idx; + + KBASE_DEBUG_ASSERT(0 <= sched_prio + && sched_prio < KBASE_JS_ATOM_SCHED_PRIO_COUNT); + + prio_idx = (unsigned int)sched_prio; + + return kbasep_js_relative_priority_to_atom[prio_idx]; +} + + /** @} *//* end group kbase_js */ + /** @} *//* end group base_kbase_api */ + /** @} *//* end group base_api */ + +#endif /* _KBASE_JS_H_ */ diff --git a/drivers/gpu/arm/midgard/mali_kbase_js_ctx_attr.c b/drivers/gpu/arm/midgard/mali_kbase_js_ctx_attr.c new file mode 100644 index 00000000000000..321506ada83597 --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_js_ctx_attr.c @@ -0,0 +1,301 @@ +/* + * + * (C) COPYRIGHT 2012-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + +#include +#include + +/* + * Private functions follow + */ + +/** + * @brief Check whether a ctx has a certain attribute, and if so, retain that + * attribute on the runpool. + * + * Requires: + * - jsctx mutex + * - runpool_irq spinlock + * - ctx is scheduled on the runpool + * + * @return true indicates a change in ctx attributes state of the runpool. + * In this state, the scheduler might be able to submit more jobs than + * previously, and so the caller should ensure kbasep_js_try_run_next_job_nolock() + * or similar is called sometime later. + * @return false indicates no change in ctx attributes state of the runpool. + */ +static bool kbasep_js_ctx_attr_runpool_retain_attr(struct kbase_device *kbdev, struct kbase_context *kctx, enum kbasep_js_ctx_attr attribute) +{ + struct kbasep_js_device_data *js_devdata; + struct kbasep_js_kctx_info *js_kctx_info; + bool runpool_state_changed = false; + + KBASE_DEBUG_ASSERT(kbdev != NULL); + KBASE_DEBUG_ASSERT(kctx != NULL); + KBASE_DEBUG_ASSERT(attribute < KBASEP_JS_CTX_ATTR_COUNT); + js_devdata = &kbdev->js_data; + js_kctx_info = &kctx->jctx.sched_info; + + lockdep_assert_held(&js_kctx_info->ctx.jsctx_mutex); + lockdep_assert_held(&kbdev->hwaccess_lock); + + KBASE_DEBUG_ASSERT(kbase_ctx_flag(kctx, KCTX_SCHEDULED)); + + if (kbasep_js_ctx_attr_is_attr_on_ctx(kctx, attribute) != false) { + KBASE_DEBUG_ASSERT(js_devdata->runpool_irq.ctx_attr_ref_count[attribute] < S8_MAX); + ++(js_devdata->runpool_irq.ctx_attr_ref_count[attribute]); + + if (js_devdata->runpool_irq.ctx_attr_ref_count[attribute] == 1) { + /* First refcount indicates a state change */ + runpool_state_changed = true; + KBASE_TRACE_ADD(kbdev, JS_CTX_ATTR_NOW_ON_RUNPOOL, kctx, NULL, 0u, attribute); + } + } + + return runpool_state_changed; +} + +/** + * @brief Check whether a ctx has a certain attribute, and if so, release that + * attribute on the runpool. + * + * Requires: + * - jsctx mutex + * - runpool_irq spinlock + * - ctx is scheduled on the runpool + * + * @return true indicates a change in ctx attributes state of the runpool. + * In this state, the scheduler might be able to submit more jobs than + * previously, and so the caller should ensure kbasep_js_try_run_next_job_nolock() + * or similar is called sometime later. + * @return false indicates no change in ctx attributes state of the runpool. + */ +static bool kbasep_js_ctx_attr_runpool_release_attr(struct kbase_device *kbdev, struct kbase_context *kctx, enum kbasep_js_ctx_attr attribute) +{ + struct kbasep_js_device_data *js_devdata; + struct kbasep_js_kctx_info *js_kctx_info; + bool runpool_state_changed = false; + + KBASE_DEBUG_ASSERT(kbdev != NULL); + KBASE_DEBUG_ASSERT(kctx != NULL); + KBASE_DEBUG_ASSERT(attribute < KBASEP_JS_CTX_ATTR_COUNT); + js_devdata = &kbdev->js_data; + js_kctx_info = &kctx->jctx.sched_info; + + lockdep_assert_held(&js_kctx_info->ctx.jsctx_mutex); + lockdep_assert_held(&kbdev->hwaccess_lock); + KBASE_DEBUG_ASSERT(kbase_ctx_flag(kctx, KCTX_SCHEDULED)); + + if (kbasep_js_ctx_attr_is_attr_on_ctx(kctx, attribute) != false) { + KBASE_DEBUG_ASSERT(js_devdata->runpool_irq.ctx_attr_ref_count[attribute] > 0); + --(js_devdata->runpool_irq.ctx_attr_ref_count[attribute]); + + if (js_devdata->runpool_irq.ctx_attr_ref_count[attribute] == 0) { + /* Last de-refcount indicates a state change */ + runpool_state_changed = true; + KBASE_TRACE_ADD(kbdev, JS_CTX_ATTR_NOW_OFF_RUNPOOL, kctx, NULL, 0u, attribute); + } + } + + return runpool_state_changed; +} + +/** + * @brief Retain a certain attribute on a ctx, also retaining it on the runpool + * if the context is scheduled. + * + * Requires: + * - jsctx mutex + * - If the context is scheduled, then runpool_irq spinlock must also be held + * + * @return true indicates a change in ctx attributes state of the runpool. + * This may allow the scheduler to submit more jobs than previously. + * @return false indicates no change in ctx attributes state of the runpool. + */ +static bool kbasep_js_ctx_attr_ctx_retain_attr(struct kbase_device *kbdev, struct kbase_context *kctx, enum kbasep_js_ctx_attr attribute) +{ + struct kbasep_js_kctx_info *js_kctx_info; + bool runpool_state_changed = false; + + KBASE_DEBUG_ASSERT(kbdev != NULL); + KBASE_DEBUG_ASSERT(kctx != NULL); + KBASE_DEBUG_ASSERT(attribute < KBASEP_JS_CTX_ATTR_COUNT); + js_kctx_info = &kctx->jctx.sched_info; + + lockdep_assert_held(&kbdev->hwaccess_lock); + lockdep_assert_held(&js_kctx_info->ctx.jsctx_mutex); + KBASE_DEBUG_ASSERT(js_kctx_info->ctx.ctx_attr_ref_count[attribute] < U32_MAX); + + ++(js_kctx_info->ctx.ctx_attr_ref_count[attribute]); + + if (kbase_ctx_flag(kctx, KCTX_SCHEDULED) && js_kctx_info->ctx.ctx_attr_ref_count[attribute] == 1) { + /* Only ref-count the attribute on the runpool for the first time this contexts sees this attribute */ + KBASE_TRACE_ADD(kbdev, JS_CTX_ATTR_NOW_ON_CTX, kctx, NULL, 0u, attribute); + runpool_state_changed = kbasep_js_ctx_attr_runpool_retain_attr(kbdev, kctx, attribute); + } + + return runpool_state_changed; +} + +/* + * @brief Release a certain attribute on a ctx, also releasing it from the runpool + * if the context is scheduled. + * + * Requires: + * - jsctx mutex + * - If the context is scheduled, then runpool_irq spinlock must also be held + * + * @return true indicates a change in ctx attributes state of the runpool. + * This may allow the scheduler to submit more jobs than previously. + * @return false indicates no change in ctx attributes state of the runpool. + */ +static bool kbasep_js_ctx_attr_ctx_release_attr(struct kbase_device *kbdev, struct kbase_context *kctx, enum kbasep_js_ctx_attr attribute) +{ + struct kbasep_js_kctx_info *js_kctx_info; + bool runpool_state_changed = false; + + KBASE_DEBUG_ASSERT(kbdev != NULL); + KBASE_DEBUG_ASSERT(kctx != NULL); + KBASE_DEBUG_ASSERT(attribute < KBASEP_JS_CTX_ATTR_COUNT); + js_kctx_info = &kctx->jctx.sched_info; + + lockdep_assert_held(&js_kctx_info->ctx.jsctx_mutex); + KBASE_DEBUG_ASSERT(js_kctx_info->ctx.ctx_attr_ref_count[attribute] > 0); + + if (kbase_ctx_flag(kctx, KCTX_SCHEDULED) && js_kctx_info->ctx.ctx_attr_ref_count[attribute] == 1) { + lockdep_assert_held(&kbdev->hwaccess_lock); + /* Only de-ref-count the attribute on the runpool when this is the last ctx-reference to it */ + runpool_state_changed = kbasep_js_ctx_attr_runpool_release_attr(kbdev, kctx, attribute); + KBASE_TRACE_ADD(kbdev, JS_CTX_ATTR_NOW_OFF_CTX, kctx, NULL, 0u, attribute); + } + + /* De-ref must happen afterwards, because kbasep_js_ctx_attr_runpool_release() needs to check it too */ + --(js_kctx_info->ctx.ctx_attr_ref_count[attribute]); + + return runpool_state_changed; +} + +/* + * More commonly used public functions + */ + +void kbasep_js_ctx_attr_set_initial_attrs(struct kbase_device *kbdev, struct kbase_context *kctx) +{ + bool runpool_state_changed = false; + + KBASE_DEBUG_ASSERT(kbdev != NULL); + KBASE_DEBUG_ASSERT(kctx != NULL); + + if (kbase_ctx_flag(kctx, KCTX_SUBMIT_DISABLED)) { + /* This context never submits, so don't track any scheduling attributes */ + return; + } + + /* Transfer attributes held in the context flags for contexts that have submit enabled */ + + /* ... More attributes can be added here ... */ + + /* The context should not have been scheduled yet, so ASSERT if this caused + * runpool state changes (note that other threads *can't* affect the value + * of runpool_state_changed, due to how it's calculated) */ + KBASE_DEBUG_ASSERT(runpool_state_changed == false); + CSTD_UNUSED(runpool_state_changed); +} + +void kbasep_js_ctx_attr_runpool_retain_ctx(struct kbase_device *kbdev, struct kbase_context *kctx) +{ + bool runpool_state_changed; + int i; + + /* Retain any existing attributes */ + for (i = 0; i < KBASEP_JS_CTX_ATTR_COUNT; ++i) { + if (kbasep_js_ctx_attr_is_attr_on_ctx(kctx, (enum kbasep_js_ctx_attr) i) != false) { + /* The context is being scheduled in, so update the runpool with the new attributes */ + runpool_state_changed = kbasep_js_ctx_attr_runpool_retain_attr(kbdev, kctx, (enum kbasep_js_ctx_attr) i); + + /* We don't need to know about state changed, because retaining a + * context occurs on scheduling it, and that itself will also try + * to run new atoms */ + CSTD_UNUSED(runpool_state_changed); + } + } +} + +bool kbasep_js_ctx_attr_runpool_release_ctx(struct kbase_device *kbdev, struct kbase_context *kctx) +{ + bool runpool_state_changed = false; + int i; + + /* Release any existing attributes */ + for (i = 0; i < KBASEP_JS_CTX_ATTR_COUNT; ++i) { + if (kbasep_js_ctx_attr_is_attr_on_ctx(kctx, (enum kbasep_js_ctx_attr) i) != false) { + /* The context is being scheduled out, so update the runpool on the removed attributes */ + runpool_state_changed |= kbasep_js_ctx_attr_runpool_release_attr(kbdev, kctx, (enum kbasep_js_ctx_attr) i); + } + } + + return runpool_state_changed; +} + +void kbasep_js_ctx_attr_ctx_retain_atom(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *katom) +{ + bool runpool_state_changed = false; + base_jd_core_req core_req; + + KBASE_DEBUG_ASSERT(katom); + core_req = katom->core_req; + + if (core_req & BASE_JD_REQ_ONLY_COMPUTE) + runpool_state_changed |= kbasep_js_ctx_attr_ctx_retain_attr(kbdev, kctx, KBASEP_JS_CTX_ATTR_COMPUTE); + else + runpool_state_changed |= kbasep_js_ctx_attr_ctx_retain_attr(kbdev, kctx, KBASEP_JS_CTX_ATTR_NON_COMPUTE); + + if ((core_req & (BASE_JD_REQ_CS | BASE_JD_REQ_ONLY_COMPUTE | BASE_JD_REQ_T)) != 0 && (core_req & (BASE_JD_REQ_COHERENT_GROUP | BASE_JD_REQ_SPECIFIC_COHERENT_GROUP)) == 0) { + /* Atom that can run on slot1 or slot2, and can use all cores */ + runpool_state_changed |= kbasep_js_ctx_attr_ctx_retain_attr(kbdev, kctx, KBASEP_JS_CTX_ATTR_COMPUTE_ALL_CORES); + } + + /* We don't need to know about state changed, because retaining an + * atom occurs on adding it, and that itself will also try to run + * new atoms */ + CSTD_UNUSED(runpool_state_changed); +} + +bool kbasep_js_ctx_attr_ctx_release_atom(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbasep_js_atom_retained_state *katom_retained_state) +{ + bool runpool_state_changed = false; + base_jd_core_req core_req; + + KBASE_DEBUG_ASSERT(katom_retained_state); + core_req = katom_retained_state->core_req; + + /* No-op for invalid atoms */ + if (kbasep_js_atom_retained_state_is_valid(katom_retained_state) == false) + return false; + + if (core_req & BASE_JD_REQ_ONLY_COMPUTE) + runpool_state_changed |= kbasep_js_ctx_attr_ctx_release_attr(kbdev, kctx, KBASEP_JS_CTX_ATTR_COMPUTE); + else + runpool_state_changed |= kbasep_js_ctx_attr_ctx_release_attr(kbdev, kctx, KBASEP_JS_CTX_ATTR_NON_COMPUTE); + + if ((core_req & (BASE_JD_REQ_CS | BASE_JD_REQ_ONLY_COMPUTE | BASE_JD_REQ_T)) != 0 && (core_req & (BASE_JD_REQ_COHERENT_GROUP | BASE_JD_REQ_SPECIFIC_COHERENT_GROUP)) == 0) { + /* Atom that can run on slot1 or slot2, and can use all cores */ + runpool_state_changed |= kbasep_js_ctx_attr_ctx_release_attr(kbdev, kctx, KBASEP_JS_CTX_ATTR_COMPUTE_ALL_CORES); + } + + return runpool_state_changed; +} diff --git a/drivers/gpu/arm/midgard/mali_kbase_js_ctx_attr.h b/drivers/gpu/arm/midgard/mali_kbase_js_ctx_attr.h new file mode 100644 index 00000000000000..ce9183326a576f --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_js_ctx_attr.h @@ -0,0 +1,158 @@ +/* + * + * (C) COPYRIGHT 2012-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + + +/** + * @file mali_kbase_js_ctx_attr.h + * Job Scheduler Context Attribute APIs + */ + +#ifndef _KBASE_JS_CTX_ATTR_H_ +#define _KBASE_JS_CTX_ATTR_H_ + +/** + * @addtogroup base_api + * @{ + */ + +/** + * @addtogroup base_kbase_api + * @{ + */ + +/** + * @addtogroup kbase_js + * @{ + */ + +/** + * Set the initial attributes of a context (when context create flags are set) + * + * Requires: + * - Hold the jsctx_mutex + */ +void kbasep_js_ctx_attr_set_initial_attrs(struct kbase_device *kbdev, struct kbase_context *kctx); + +/** + * Retain all attributes of a context + * + * This occurs on scheduling in the context on the runpool (but after + * is_scheduled is set) + * + * Requires: + * - jsctx mutex + * - runpool_irq spinlock + * - ctx->is_scheduled is true + */ +void kbasep_js_ctx_attr_runpool_retain_ctx(struct kbase_device *kbdev, struct kbase_context *kctx); + +/** + * Release all attributes of a context + * + * This occurs on scheduling out the context from the runpool (but before + * is_scheduled is cleared) + * + * Requires: + * - jsctx mutex + * - runpool_irq spinlock + * - ctx->is_scheduled is true + * + * @return true indicates a change in ctx attributes state of the runpool. + * In this state, the scheduler might be able to submit more jobs than + * previously, and so the caller should ensure kbasep_js_try_run_next_job_nolock() + * or similar is called sometime later. + * @return false indicates no change in ctx attributes state of the runpool. + */ +bool kbasep_js_ctx_attr_runpool_release_ctx(struct kbase_device *kbdev, struct kbase_context *kctx); + +/** + * Retain all attributes of an atom + * + * This occurs on adding an atom to a context + * + * Requires: + * - jsctx mutex + * - If the context is scheduled, then runpool_irq spinlock must also be held + */ +void kbasep_js_ctx_attr_ctx_retain_atom(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *katom); + +/** + * Release all attributes of an atom, given its retained state. + * + * This occurs after (permanently) removing an atom from a context + * + * Requires: + * - jsctx mutex + * - If the context is scheduled, then runpool_irq spinlock must also be held + * + * This is a no-op when \a katom_retained_state is invalid. + * + * @return true indicates a change in ctx attributes state of the runpool. + * In this state, the scheduler might be able to submit more jobs than + * previously, and so the caller should ensure kbasep_js_try_run_next_job_nolock() + * or similar is called sometime later. + * @return false indicates no change in ctx attributes state of the runpool. + */ +bool kbasep_js_ctx_attr_ctx_release_atom(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbasep_js_atom_retained_state *katom_retained_state); + +/** + * Requires: + * - runpool_irq spinlock + */ +static inline s8 kbasep_js_ctx_attr_count_on_runpool(struct kbase_device *kbdev, enum kbasep_js_ctx_attr attribute) +{ + struct kbasep_js_device_data *js_devdata; + + KBASE_DEBUG_ASSERT(kbdev != NULL); + KBASE_DEBUG_ASSERT(attribute < KBASEP_JS_CTX_ATTR_COUNT); + js_devdata = &kbdev->js_data; + + return js_devdata->runpool_irq.ctx_attr_ref_count[attribute]; +} + +/** + * Requires: + * - runpool_irq spinlock + */ +static inline bool kbasep_js_ctx_attr_is_attr_on_runpool(struct kbase_device *kbdev, enum kbasep_js_ctx_attr attribute) +{ + /* In general, attributes are 'on' when they have a non-zero refcount (note: the refcount will never be < 0) */ + return (bool) kbasep_js_ctx_attr_count_on_runpool(kbdev, attribute); +} + +/** + * Requires: + * - jsctx mutex + */ +static inline bool kbasep_js_ctx_attr_is_attr_on_ctx(struct kbase_context *kctx, enum kbasep_js_ctx_attr attribute) +{ + struct kbasep_js_kctx_info *js_kctx_info; + + KBASE_DEBUG_ASSERT(kctx != NULL); + KBASE_DEBUG_ASSERT(attribute < KBASEP_JS_CTX_ATTR_COUNT); + js_kctx_info = &kctx->jctx.sched_info; + + /* In general, attributes are 'on' when they have a refcount (which should never be < 0) */ + return (bool) (js_kctx_info->ctx.ctx_attr_ref_count[attribute]); +} + + /** @} *//* end group kbase_js */ + /** @} *//* end group base_kbase_api */ + /** @} *//* end group base_api */ + +#endif /* _KBASE_JS_DEFS_H_ */ diff --git a/drivers/gpu/arm/midgard/mali_kbase_js_defs.h b/drivers/gpu/arm/midgard/mali_kbase_js_defs.h new file mode 100644 index 00000000000000..ba8b6441549bd8 --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_js_defs.h @@ -0,0 +1,386 @@ +/* + * + * (C) COPYRIGHT 2011-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + + +/** + * @file mali_kbase_js.h + * Job Scheduler Type Definitions + */ + +#ifndef _KBASE_JS_DEFS_H_ +#define _KBASE_JS_DEFS_H_ + +/** + * @addtogroup base_api + * @{ + */ + +/** + * @addtogroup base_kbase_api + * @{ + */ + +/** + * @addtogroup kbase_js + * @{ + */ +/* Forward decls */ +struct kbase_device; +struct kbase_jd_atom; + + +typedef u32 kbase_context_flags; + +struct kbasep_atom_req { + base_jd_core_req core_req; + kbase_context_flags ctx_req; + u32 device_nr; +}; + +/** Callback function run on all of a context's jobs registered with the Job + * Scheduler */ +typedef void (*kbasep_js_ctx_job_cb)(struct kbase_device *kbdev, struct kbase_jd_atom *katom); + +/** + * @brief Maximum number of jobs that can be submitted to a job slot whilst + * inside the IRQ handler. + * + * This is important because GPU NULL jobs can complete whilst the IRQ handler + * is running. Otherwise, it potentially allows an unlimited number of GPU NULL + * jobs to be submitted inside the IRQ handler, which increases IRQ latency. + */ +#define KBASE_JS_MAX_JOB_SUBMIT_PER_SLOT_PER_IRQ 2 + +/** + * @brief Context attributes + * + * Each context attribute can be thought of as a boolean value that caches some + * state information about either the runpool, or the context: + * - In the case of the runpool, it is a cache of "Do any contexts owned by + * the runpool have attribute X?" + * - In the case of a context, it is a cache of "Do any atoms owned by the + * context have attribute X?" + * + * The boolean value of the context attributes often affect scheduling + * decisions, such as affinities to use and job slots to use. + * + * To accomodate changes of state in the context, each attribute is refcounted + * in the context, and in the runpool for all running contexts. Specifically: + * - The runpool holds a refcount of how many contexts in the runpool have this + * attribute. + * - The context holds a refcount of how many atoms have this attribute. + */ +enum kbasep_js_ctx_attr { + /** Attribute indicating a context that contains Compute jobs. That is, + * the context has jobs of type @ref BASE_JD_REQ_ONLY_COMPUTE + * + * @note A context can be both 'Compute' and 'Non Compute' if it contains + * both types of jobs. + */ + KBASEP_JS_CTX_ATTR_COMPUTE, + + /** Attribute indicating a context that contains Non-Compute jobs. That is, + * the context has some jobs that are \b not of type @ref + * BASE_JD_REQ_ONLY_COMPUTE. + * + * @note A context can be both 'Compute' and 'Non Compute' if it contains + * both types of jobs. + */ + KBASEP_JS_CTX_ATTR_NON_COMPUTE, + + /** Attribute indicating that a context contains compute-job atoms that + * aren't restricted to a coherent group, and can run on all cores. + * + * Specifically, this is when the atom's \a core_req satisfy: + * - (\a core_req & (BASE_JD_REQ_CS | BASE_JD_REQ_ONLY_COMPUTE | BASE_JD_REQ_T) // uses slot 1 or slot 2 + * - && !(\a core_req & BASE_JD_REQ_COHERENT_GROUP) // not restricted to coherent groups + * + * Such atoms could be blocked from running if one of the coherent groups + * is being used by another job slot, so tracking this context attribute + * allows us to prevent such situations. + * + * @note This doesn't take into account the 1-coregroup case, where all + * compute atoms would effectively be able to run on 'all cores', but + * contexts will still not always get marked with this attribute. Instead, + * it is the caller's responsibility to take into account the number of + * coregroups when interpreting this attribute. + * + * @note Whilst Tiler atoms are normally combined with + * BASE_JD_REQ_COHERENT_GROUP, it is possible to send such atoms without + * BASE_JD_REQ_COHERENT_GROUP set. This is an unlikely case, but it's easy + * enough to handle anyway. + */ + KBASEP_JS_CTX_ATTR_COMPUTE_ALL_CORES, + + /** Must be the last in the enum */ + KBASEP_JS_CTX_ATTR_COUNT +}; + +enum { + /** Bit indicating that new atom should be started because this atom completed */ + KBASE_JS_ATOM_DONE_START_NEW_ATOMS = (1u << 0), + /** Bit indicating that the atom was evicted from the JS_NEXT registers */ + KBASE_JS_ATOM_DONE_EVICTED_FROM_NEXT = (1u << 1) +}; + +/** Combination of KBASE_JS_ATOM_DONE_<...> bits */ +typedef u32 kbasep_js_atom_done_code; + +/** + * @brief KBase Device Data Job Scheduler sub-structure + * + * This encapsulates the current context of the Job Scheduler on a particular + * device. This context is global to the device, and is not tied to any + * particular struct kbase_context running on the device. + * + * nr_contexts_running and as_free are optimized for packing together (by making + * them smaller types than u32). The operations on them should rarely involve + * masking. The use of signed types for arithmetic indicates to the compiler that + * the value will not rollover (which would be undefined behavior), and so under + * the Total License model, it is free to make optimizations based on that (i.e. + * to remove masking). + */ +struct kbasep_js_device_data { + /* Sub-structure to collect together Job Scheduling data used in IRQ + * context. The hwaccess_lock must be held when accessing. */ + struct runpool_irq { + /** Bitvector indicating whether a currently scheduled context is allowed to submit jobs. + * When bit 'N' is set in this, it indicates whether the context bound to address space + * 'N' is allowed to submit jobs. + */ + u16 submit_allowed; + + /** Context Attributes: + * Each is large enough to hold a refcount of the number of contexts + * that can fit into the runpool. This is currently BASE_MAX_NR_AS + * + * Note that when BASE_MAX_NR_AS==16 we need 5 bits (not 4) to store + * the refcount. Hence, it's not worthwhile reducing this to + * bit-manipulation on u32s to save space (where in contrast, 4 bit + * sub-fields would be easy to do and would save space). + * + * Whilst this must not become negative, the sign bit is used for: + * - error detection in debug builds + * - Optimization: it is undefined for a signed int to overflow, and so + * the compiler can optimize for that never happening (thus, no masking + * is required on updating the variable) */ + s8 ctx_attr_ref_count[KBASEP_JS_CTX_ATTR_COUNT]; + + /* + * Affinity management and tracking + */ + /** Bitvector to aid affinity checking. Element 'n' bit 'i' indicates + * that slot 'n' is using core i (i.e. slot_affinity_refcount[n][i] > 0) */ + u64 slot_affinities[BASE_JM_MAX_NR_SLOTS]; + /** Refcount for each core owned by each slot. Used to generate the + * slot_affinities array of bitvectors + * + * The value of the refcount will not exceed BASE_JM_SUBMIT_SLOTS, + * because it is refcounted only when a job is definitely about to be + * submitted to a slot, and is de-refcounted immediately after a job + * finishes */ + s8 slot_affinity_refcount[BASE_JM_MAX_NR_SLOTS][64]; + } runpool_irq; + + /** + * Run Pool mutex, for managing contexts within the runpool. + * Unless otherwise specified, you must hold this lock whilst accessing any + * members that follow + * + * In addition, this is used to access: + * - the kbasep_js_kctx_info::runpool substructure + */ + struct mutex runpool_mutex; + + /** + * Queue Lock, used to access the Policy's queue of contexts independently + * of the Run Pool. + * + * Of course, you don't need the Run Pool lock to access this. + */ + struct mutex queue_mutex; + + /** + * Scheduling semaphore. This must be held when calling + * kbase_jm_kick() + */ + struct semaphore schedule_sem; + + /** + * List of contexts that can currently be pulled from + */ + struct list_head ctx_list_pullable[BASE_JM_MAX_NR_SLOTS]; + /** + * List of contexts that can not currently be pulled from, but have + * jobs currently running. + */ + struct list_head ctx_list_unpullable[BASE_JM_MAX_NR_SLOTS]; + + /** Number of currently scheduled user contexts (excluding ones that are not submitting jobs) */ + s8 nr_user_contexts_running; + /** Number of currently scheduled contexts (including ones that are not submitting jobs) */ + s8 nr_all_contexts_running; + + /** Core Requirements to match up with base_js_atom's core_req memeber + * @note This is a write-once member, and so no locking is required to read */ + base_jd_core_req js_reqs[BASE_JM_MAX_NR_SLOTS]; + + u32 scheduling_period_ns; /*< Value for JS_SCHEDULING_PERIOD_NS */ + u32 soft_stop_ticks; /*< Value for JS_SOFT_STOP_TICKS */ + u32 soft_stop_ticks_cl; /*< Value for JS_SOFT_STOP_TICKS_CL */ + u32 hard_stop_ticks_ss; /*< Value for JS_HARD_STOP_TICKS_SS */ + u32 hard_stop_ticks_cl; /*< Value for JS_HARD_STOP_TICKS_CL */ + u32 hard_stop_ticks_dumping; /*< Value for JS_HARD_STOP_TICKS_DUMPING */ + u32 gpu_reset_ticks_ss; /*< Value for JS_RESET_TICKS_SS */ + u32 gpu_reset_ticks_cl; /*< Value for JS_RESET_TICKS_CL */ + u32 gpu_reset_ticks_dumping; /*< Value for JS_RESET_TICKS_DUMPING */ + u32 ctx_timeslice_ns; /**< Value for JS_CTX_TIMESLICE_NS */ + + /**< Value for JS_SOFT_JOB_TIMEOUT */ + atomic_t soft_job_timeout_ms; + + /** List of suspended soft jobs */ + struct list_head suspended_soft_jobs_list; + +#ifdef CONFIG_MALI_DEBUG + /* Support soft-stop on a single context */ + bool softstop_always; +#endif /* CONFIG_MALI_DEBUG */ + + /** The initalized-flag is placed at the end, to avoid cache-pollution (we should + * only be using this during init/term paths). + * @note This is a write-once member, and so no locking is required to read */ + int init_status; + + /* Number of contexts that can currently be pulled from */ + u32 nr_contexts_pullable; + + /* Number of contexts that can either be pulled from or are currently + * running */ + atomic_t nr_contexts_runnable; +}; + +/** + * @brief KBase Context Job Scheduling information structure + * + * This is a substructure in the struct kbase_context that encapsulates all the + * scheduling information. + */ +struct kbasep_js_kctx_info { + + /** + * Job Scheduler Context information sub-structure. These members are + * accessed regardless of whether the context is: + * - In the Policy's Run Pool + * - In the Policy's Queue + * - Not queued nor in the Run Pool. + * + * You must obtain the jsctx_mutex before accessing any other members of + * this substructure. + * + * You may not access any of these members from IRQ context. + */ + struct kbase_jsctx { + struct mutex jsctx_mutex; /**< Job Scheduler Context lock */ + + /** Number of jobs ready to run - does \em not include the jobs waiting in + * the dispatcher, and dependency-only jobs. See kbase_jd_context::job_nr + * for such jobs*/ + u32 nr_jobs; + + /** Context Attributes: + * Each is large enough to hold a refcount of the number of atoms on + * the context. **/ + u32 ctx_attr_ref_count[KBASEP_JS_CTX_ATTR_COUNT]; + + /** + * Wait queue to wait for KCTX_SHEDULED flag state changes. + * */ + wait_queue_head_t is_scheduled_wait; + + /** Link implementing JS queues. Context can be present on one + * list per job slot + */ + struct list_head ctx_list_entry[BASE_JM_MAX_NR_SLOTS]; + } ctx; + + /* The initalized-flag is placed at the end, to avoid cache-pollution (we should + * only be using this during init/term paths) */ + int init_status; +}; + +/** Subset of atom state that can be available after jd_done_nolock() is called + * on that atom. A copy must be taken via kbasep_js_atom_retained_state_copy(), + * because the original atom could disappear. */ +struct kbasep_js_atom_retained_state { + /** Event code - to determine whether the atom has finished */ + enum base_jd_event_code event_code; + /** core requirements */ + base_jd_core_req core_req; + /* priority */ + int sched_priority; + /** Job Slot to retry submitting to if submission from IRQ handler failed */ + int retry_submit_on_slot; + /* Core group atom was executed on */ + u32 device_nr; + +}; + +/** + * Value signifying 'no retry on a slot required' for: + * - kbase_js_atom_retained_state::retry_submit_on_slot + * - kbase_jd_atom::retry_submit_on_slot + */ +#define KBASEP_JS_RETRY_SUBMIT_SLOT_INVALID (-1) + +/** + * base_jd_core_req value signifying 'invalid' for a kbase_jd_atom_retained_state. + * + * @see kbase_atom_retained_state_is_valid() + */ +#define KBASEP_JS_ATOM_RETAINED_STATE_CORE_REQ_INVALID BASE_JD_REQ_DEP + +/** + * @brief The JS timer resolution, in microseconds + * + * Any non-zero difference in time will be at least this size. + */ +#define KBASEP_JS_TICK_RESOLUTION_US 1 + +/* + * Internal atom priority defines for kbase_jd_atom::sched_prio + */ +enum { + KBASE_JS_ATOM_SCHED_PRIO_HIGH = 0, + KBASE_JS_ATOM_SCHED_PRIO_MED, + KBASE_JS_ATOM_SCHED_PRIO_LOW, + KBASE_JS_ATOM_SCHED_PRIO_COUNT, +}; + +/* Invalid priority for kbase_jd_atom::sched_prio */ +#define KBASE_JS_ATOM_SCHED_PRIO_INVALID -1 + +/* Default priority in the case of contexts with no atoms, or being lenient + * about invalid priorities from userspace */ +#define KBASE_JS_ATOM_SCHED_PRIO_DEFAULT KBASE_JS_ATOM_SCHED_PRIO_MED + + /** @} *//* end group kbase_js */ + /** @} *//* end group base_kbase_api */ + /** @} *//* end group base_api */ + +#endif /* _KBASE_JS_DEFS_H_ */ diff --git a/drivers/gpu/arm/midgard/mali_kbase_linux.h b/drivers/gpu/arm/midgard/mali_kbase_linux.h new file mode 100644 index 00000000000000..6d1e61fd41e038 --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_linux.h @@ -0,0 +1,43 @@ +/* + * + * (C) COPYRIGHT 2010-2014 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + + +/** + * @file mali_kbase_linux.h + * Base kernel APIs, Linux implementation. + */ + +#ifndef _KBASE_LINUX_H_ +#define _KBASE_LINUX_H_ + +/* All things that are needed for the Linux port. */ +#include +#include +#include +#include +#include + +#if (defined(MALI_KERNEL_TEST_API) && (1 == MALI_KERNEL_TEST_API)) + #define KBASE_EXPORT_TEST_API(func) EXPORT_SYMBOL(func) +#else + #define KBASE_EXPORT_TEST_API(func) +#endif + +#define KBASE_EXPORT_SYMBOL(func) EXPORT_SYMBOL(func) + +#endif /* _KBASE_LINUX_H_ */ diff --git a/drivers/gpu/arm/midgard/mali_kbase_mem.c b/drivers/gpu/arm/midgard/mali_kbase_mem.c new file mode 100644 index 00000000000000..ea02c60397d5a3 --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_mem.c @@ -0,0 +1,2832 @@ +/* + * + * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + + +/** + * @file mali_kbase_mem.c + * Base kernel memory APIs + */ +#ifdef CONFIG_DMA_SHARED_BUFFER +#include +#endif /* CONFIG_DMA_SHARED_BUFFER */ +#ifdef CONFIG_UMP +#include +#endif /* CONFIG_UMP */ +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +/* This function finds out which RB tree the given GPU VA region belongs to + * based on the region zone */ +static struct rb_root *kbase_reg_flags_to_rbtree(struct kbase_context *kctx, + struct kbase_va_region *reg) +{ + struct rb_root *rbtree = NULL; + + switch (reg->flags & KBASE_REG_ZONE_MASK) { + case KBASE_REG_ZONE_CUSTOM_VA: + rbtree = &kctx->reg_rbtree_custom; + break; + case KBASE_REG_ZONE_EXEC: + rbtree = &kctx->reg_rbtree_exec; + break; + case KBASE_REG_ZONE_SAME_VA: + rbtree = &kctx->reg_rbtree_same; + /* fall through */ + default: + rbtree = &kctx->reg_rbtree_same; + break; + } + + return rbtree; +} + +/* This function finds out which RB tree the given pfn from the GPU VA belongs + * to based on the memory zone the pfn refers to */ +static struct rb_root *kbase_gpu_va_to_rbtree(struct kbase_context *kctx, + u64 gpu_pfn) +{ + struct rb_root *rbtree = NULL; + +#ifdef CONFIG_64BIT + if (kbase_ctx_flag(kctx, KCTX_COMPAT)) { +#endif /* CONFIG_64BIT */ + if (gpu_pfn >= KBASE_REG_ZONE_CUSTOM_VA_BASE) + rbtree = &kctx->reg_rbtree_custom; + else if (gpu_pfn >= KBASE_REG_ZONE_EXEC_BASE) + rbtree = &kctx->reg_rbtree_exec; + else + rbtree = &kctx->reg_rbtree_same; +#ifdef CONFIG_64BIT + } else { + if (gpu_pfn >= kctx->same_va_end) + rbtree = &kctx->reg_rbtree_custom; + else + rbtree = &kctx->reg_rbtree_same; + } +#endif /* CONFIG_64BIT */ + + return rbtree; +} + +/* This function inserts a region into the tree. */ +static void kbase_region_tracker_insert(struct kbase_context *kctx, + struct kbase_va_region *new_reg) +{ + u64 start_pfn = new_reg->start_pfn; + struct rb_node **link = NULL; + struct rb_node *parent = NULL; + struct rb_root *rbtree = NULL; + + rbtree = kbase_reg_flags_to_rbtree(kctx, new_reg); + + link = &(rbtree->rb_node); + /* Find the right place in the tree using tree search */ + while (*link) { + struct kbase_va_region *old_reg; + + parent = *link; + old_reg = rb_entry(parent, struct kbase_va_region, rblink); + + /* RBTree requires no duplicate entries. */ + KBASE_DEBUG_ASSERT(old_reg->start_pfn != start_pfn); + + if (old_reg->start_pfn > start_pfn) + link = &(*link)->rb_left; + else + link = &(*link)->rb_right; + } + + /* Put the new node there, and rebalance tree */ + rb_link_node(&(new_reg->rblink), parent, link); + + rb_insert_color(&(new_reg->rblink), rbtree); +} + +/* Find allocated region enclosing free range. */ +static struct kbase_va_region *kbase_region_tracker_find_region_enclosing_range_free( + struct kbase_context *kctx, u64 start_pfn, size_t nr_pages) +{ + struct rb_node *rbnode = NULL; + struct kbase_va_region *reg = NULL; + struct rb_root *rbtree = NULL; + + u64 end_pfn = start_pfn + nr_pages; + + rbtree = kbase_gpu_va_to_rbtree(kctx, start_pfn); + + rbnode = rbtree->rb_node; + + while (rbnode) { + u64 tmp_start_pfn, tmp_end_pfn; + + reg = rb_entry(rbnode, struct kbase_va_region, rblink); + tmp_start_pfn = reg->start_pfn; + tmp_end_pfn = reg->start_pfn + reg->nr_pages; + + /* If start is lower than this, go left. */ + if (start_pfn < tmp_start_pfn) + rbnode = rbnode->rb_left; + /* If end is higher than this, then go right. */ + else if (end_pfn > tmp_end_pfn) + rbnode = rbnode->rb_right; + else /* Enclosing */ + return reg; + } + + return NULL; +} + +/* Find region enclosing given address. */ +struct kbase_va_region *kbase_region_tracker_find_region_enclosing_address(struct kbase_context *kctx, u64 gpu_addr) +{ + struct rb_node *rbnode; + struct kbase_va_region *reg; + u64 gpu_pfn = gpu_addr >> PAGE_SHIFT; + struct rb_root *rbtree = NULL; + + KBASE_DEBUG_ASSERT(NULL != kctx); + + lockdep_assert_held(&kctx->reg_lock); + + rbtree = kbase_gpu_va_to_rbtree(kctx, gpu_pfn); + + rbnode = rbtree->rb_node; + + while (rbnode) { + u64 tmp_start_pfn, tmp_end_pfn; + + reg = rb_entry(rbnode, struct kbase_va_region, rblink); + tmp_start_pfn = reg->start_pfn; + tmp_end_pfn = reg->start_pfn + reg->nr_pages; + + /* If start is lower than this, go left. */ + if (gpu_pfn < tmp_start_pfn) + rbnode = rbnode->rb_left; + /* If end is higher than this, then go right. */ + else if (gpu_pfn >= tmp_end_pfn) + rbnode = rbnode->rb_right; + else /* Enclosing */ + return reg; + } + + return NULL; +} + +KBASE_EXPORT_TEST_API(kbase_region_tracker_find_region_enclosing_address); + +/* Find region with given base address */ +struct kbase_va_region *kbase_region_tracker_find_region_base_address(struct kbase_context *kctx, u64 gpu_addr) +{ + u64 gpu_pfn = gpu_addr >> PAGE_SHIFT; + struct rb_node *rbnode = NULL; + struct kbase_va_region *reg = NULL; + struct rb_root *rbtree = NULL; + + KBASE_DEBUG_ASSERT(NULL != kctx); + + lockdep_assert_held(&kctx->reg_lock); + + rbtree = kbase_gpu_va_to_rbtree(kctx, gpu_pfn); + + rbnode = rbtree->rb_node; + + while (rbnode) { + reg = rb_entry(rbnode, struct kbase_va_region, rblink); + if (reg->start_pfn > gpu_pfn) + rbnode = rbnode->rb_left; + else if (reg->start_pfn < gpu_pfn) + rbnode = rbnode->rb_right; + else + return reg; + + } + + return NULL; +} + +KBASE_EXPORT_TEST_API(kbase_region_tracker_find_region_base_address); + +/* Find region meeting given requirements */ +static struct kbase_va_region *kbase_region_tracker_find_region_meeting_reqs(struct kbase_context *kctx, struct kbase_va_region *reg_reqs, size_t nr_pages, size_t align) +{ + struct rb_node *rbnode = NULL; + struct kbase_va_region *reg = NULL; + struct rb_root *rbtree = NULL; + + /* Note that this search is a linear search, as we do not have a target + address in mind, so does not benefit from the rbtree search */ + + rbtree = kbase_reg_flags_to_rbtree(kctx, reg_reqs); + + rbnode = rb_first(rbtree); + + while (rbnode) { + reg = rb_entry(rbnode, struct kbase_va_region, rblink); + if ((reg->nr_pages >= nr_pages) && + (reg->flags & KBASE_REG_FREE)) { + /* Check alignment */ + u64 start_pfn = (reg->start_pfn + align - 1) & ~(align - 1); + + if ((start_pfn >= reg->start_pfn) && + (start_pfn <= (reg->start_pfn + reg->nr_pages - 1)) && + ((start_pfn + nr_pages - 1) <= (reg->start_pfn + reg->nr_pages - 1))) + return reg; + } + rbnode = rb_next(rbnode); + } + + return NULL; +} + +/** + * @brief Remove a region object from the global list. + * + * The region reg is removed, possibly by merging with other free and + * compatible adjacent regions. It must be called with the context + * region lock held. The associated memory is not released (see + * kbase_free_alloced_region). Internal use only. + */ +static int kbase_remove_va_region(struct kbase_context *kctx, struct kbase_va_region *reg) +{ + struct rb_node *rbprev; + struct kbase_va_region *prev = NULL; + struct rb_node *rbnext; + struct kbase_va_region *next = NULL; + struct rb_root *reg_rbtree = NULL; + + int merged_front = 0; + int merged_back = 0; + int err = 0; + + reg_rbtree = kbase_reg_flags_to_rbtree(kctx, reg); + + /* Try to merge with the previous block first */ + rbprev = rb_prev(&(reg->rblink)); + if (rbprev) { + prev = rb_entry(rbprev, struct kbase_va_region, rblink); + if (prev->flags & KBASE_REG_FREE) { + /* We're compatible with the previous VMA, + * merge with it */ + WARN_ON((prev->flags & KBASE_REG_ZONE_MASK) != + (reg->flags & KBASE_REG_ZONE_MASK)); + prev->nr_pages += reg->nr_pages; + rb_erase(&(reg->rblink), reg_rbtree); + reg = prev; + merged_front = 1; + } + } + + /* Try to merge with the next block second */ + /* Note we do the lookup here as the tree may have been rebalanced. */ + rbnext = rb_next(&(reg->rblink)); + if (rbnext) { + /* We're compatible with the next VMA, merge with it */ + next = rb_entry(rbnext, struct kbase_va_region, rblink); + if (next->flags & KBASE_REG_FREE) { + WARN_ON((next->flags & KBASE_REG_ZONE_MASK) != + (reg->flags & KBASE_REG_ZONE_MASK)); + next->start_pfn = reg->start_pfn; + next->nr_pages += reg->nr_pages; + rb_erase(&(reg->rblink), reg_rbtree); + merged_back = 1; + if (merged_front) { + /* We already merged with prev, free it */ + kbase_free_alloced_region(reg); + } + } + } + + /* If we failed to merge then we need to add a new block */ + if (!(merged_front || merged_back)) { + /* + * We didn't merge anything. Add a new free + * placeholder and remove the original one. + */ + struct kbase_va_region *free_reg; + + free_reg = kbase_alloc_free_region(kctx, reg->start_pfn, reg->nr_pages, reg->flags & KBASE_REG_ZONE_MASK); + if (!free_reg) { + err = -ENOMEM; + goto out; + } + rb_replace_node(&(reg->rblink), &(free_reg->rblink), reg_rbtree); + } + + out: + return err; +} + +KBASE_EXPORT_TEST_API(kbase_remove_va_region); + +/** + * @brief Insert a VA region to the list, replacing the current at_reg. + */ +static int kbase_insert_va_region_nolock(struct kbase_context *kctx, struct kbase_va_region *new_reg, struct kbase_va_region *at_reg, u64 start_pfn, size_t nr_pages) +{ + struct rb_root *reg_rbtree = NULL; + int err = 0; + + reg_rbtree = kbase_reg_flags_to_rbtree(kctx, at_reg); + + /* Must be a free region */ + KBASE_DEBUG_ASSERT((at_reg->flags & KBASE_REG_FREE) != 0); + /* start_pfn should be contained within at_reg */ + KBASE_DEBUG_ASSERT((start_pfn >= at_reg->start_pfn) && (start_pfn < at_reg->start_pfn + at_reg->nr_pages)); + /* at least nr_pages from start_pfn should be contained within at_reg */ + KBASE_DEBUG_ASSERT(start_pfn + nr_pages <= at_reg->start_pfn + at_reg->nr_pages); + + new_reg->start_pfn = start_pfn; + new_reg->nr_pages = nr_pages; + + /* Regions are a whole use, so swap and delete old one. */ + if (at_reg->start_pfn == start_pfn && at_reg->nr_pages == nr_pages) { + rb_replace_node(&(at_reg->rblink), &(new_reg->rblink), + reg_rbtree); + kbase_free_alloced_region(at_reg); + } + /* New region replaces the start of the old one, so insert before. */ + else if (at_reg->start_pfn == start_pfn) { + at_reg->start_pfn += nr_pages; + KBASE_DEBUG_ASSERT(at_reg->nr_pages >= nr_pages); + at_reg->nr_pages -= nr_pages; + + kbase_region_tracker_insert(kctx, new_reg); + } + /* New region replaces the end of the old one, so insert after. */ + else if ((at_reg->start_pfn + at_reg->nr_pages) == (start_pfn + nr_pages)) { + at_reg->nr_pages -= nr_pages; + + kbase_region_tracker_insert(kctx, new_reg); + } + /* New region splits the old one, so insert and create new */ + else { + struct kbase_va_region *new_front_reg; + + new_front_reg = kbase_alloc_free_region(kctx, + at_reg->start_pfn, + start_pfn - at_reg->start_pfn, + at_reg->flags & KBASE_REG_ZONE_MASK); + + if (new_front_reg) { + at_reg->nr_pages -= nr_pages + new_front_reg->nr_pages; + at_reg->start_pfn = start_pfn + nr_pages; + + kbase_region_tracker_insert(kctx, new_front_reg); + kbase_region_tracker_insert(kctx, new_reg); + } else { + err = -ENOMEM; + } + } + + return err; +} + +/** + * @brief Add a VA region to the list. + */ +int kbase_add_va_region(struct kbase_context *kctx, + struct kbase_va_region *reg, u64 addr, + size_t nr_pages, size_t align) +{ + struct kbase_va_region *tmp; + u64 gpu_pfn = addr >> PAGE_SHIFT; + int err = 0; + + KBASE_DEBUG_ASSERT(NULL != kctx); + KBASE_DEBUG_ASSERT(NULL != reg); + + lockdep_assert_held(&kctx->reg_lock); + + if (!align) + align = 1; + + /* must be a power of 2 */ + KBASE_DEBUG_ASSERT((align & (align - 1)) == 0); + KBASE_DEBUG_ASSERT(nr_pages > 0); + + /* Path 1: Map a specific address. Find the enclosing region, which *must* be free. */ + if (gpu_pfn) { + struct device *dev = kctx->kbdev->dev; + + KBASE_DEBUG_ASSERT(!(gpu_pfn & (align - 1))); + + tmp = kbase_region_tracker_find_region_enclosing_range_free(kctx, gpu_pfn, nr_pages); + if (!tmp) { + dev_warn(dev, "Enclosing region not found: 0x%08llx gpu_pfn, %zu nr_pages", gpu_pfn, nr_pages); + err = -ENOMEM; + goto exit; + } + if (!(tmp->flags & KBASE_REG_FREE)) { + dev_warn(dev, "Zone mismatch: %lu != %lu", tmp->flags & KBASE_REG_ZONE_MASK, reg->flags & KBASE_REG_ZONE_MASK); + dev_warn(dev, "!(tmp->flags & KBASE_REG_FREE): tmp->start_pfn=0x%llx tmp->flags=0x%lx tmp->nr_pages=0x%zx gpu_pfn=0x%llx nr_pages=0x%zx\n", tmp->start_pfn, tmp->flags, tmp->nr_pages, gpu_pfn, nr_pages); + dev_warn(dev, "in function %s (%p, %p, 0x%llx, 0x%zx, 0x%zx)\n", __func__, kctx, reg, addr, nr_pages, align); + err = -ENOMEM; + goto exit; + } + + err = kbase_insert_va_region_nolock(kctx, reg, tmp, gpu_pfn, nr_pages); + if (err) { + dev_warn(dev, "Failed to insert va region"); + err = -ENOMEM; + goto exit; + } + + goto exit; + } + + /* Path 2: Map any free address which meets the requirements. */ + { + u64 start_pfn; + + /* + * Depending on the zone the allocation request is for + * we might need to retry it. + */ + do { + tmp = kbase_region_tracker_find_region_meeting_reqs( + kctx, reg, nr_pages, align); + if (tmp) { + start_pfn = (tmp->start_pfn + align - 1) & + ~(align - 1); + err = kbase_insert_va_region_nolock(kctx, reg, + tmp, start_pfn, nr_pages); + break; + } + + /* + * If the allocation is not from the same zone as JIT + * then don't retry, we're out of VA and there is + * nothing which can be done about it. + */ + if ((reg->flags & KBASE_REG_ZONE_MASK) != + KBASE_REG_ZONE_CUSTOM_VA) + break; + } while (kbase_jit_evict(kctx)); + + if (!tmp) + err = -ENOMEM; + } + + exit: + return err; +} + +KBASE_EXPORT_TEST_API(kbase_add_va_region); + +/** + * @brief Initialize the internal region tracker data structure. + */ +static void kbase_region_tracker_ds_init(struct kbase_context *kctx, + struct kbase_va_region *same_va_reg, + struct kbase_va_region *exec_reg, + struct kbase_va_region *custom_va_reg) +{ + kctx->reg_rbtree_same = RB_ROOT; + kbase_region_tracker_insert(kctx, same_va_reg); + + /* Although exec and custom_va_reg don't always exist, + * initialize unconditionally because of the mem_view debugfs + * implementation which relies on these being empty */ + kctx->reg_rbtree_exec = RB_ROOT; + kctx->reg_rbtree_custom = RB_ROOT; + + if (exec_reg) + kbase_region_tracker_insert(kctx, exec_reg); + if (custom_va_reg) + kbase_region_tracker_insert(kctx, custom_va_reg); +} + +static void kbase_region_tracker_erase_rbtree(struct rb_root *rbtree) +{ + struct rb_node *rbnode; + struct kbase_va_region *reg; + + do { + rbnode = rb_first(rbtree); + if (rbnode) { + rb_erase(rbnode, rbtree); + reg = rb_entry(rbnode, struct kbase_va_region, rblink); + kbase_free_alloced_region(reg); + } + } while (rbnode); +} + +void kbase_region_tracker_term(struct kbase_context *kctx) +{ + kbase_region_tracker_erase_rbtree(&kctx->reg_rbtree_same); + kbase_region_tracker_erase_rbtree(&kctx->reg_rbtree_exec); + kbase_region_tracker_erase_rbtree(&kctx->reg_rbtree_custom); +} + +/** + * Initialize the region tracker data structure. + */ +int kbase_region_tracker_init(struct kbase_context *kctx) +{ + struct kbase_va_region *same_va_reg; + struct kbase_va_region *exec_reg = NULL; + struct kbase_va_region *custom_va_reg = NULL; + size_t same_va_bits = sizeof(void *) * BITS_PER_BYTE; + u64 custom_va_size = KBASE_REG_ZONE_CUSTOM_VA_SIZE; + u64 gpu_va_limit = (1ULL << kctx->kbdev->gpu_props.mmu.va_bits) >> PAGE_SHIFT; + u64 same_va_pages; + int err; + + /* Take the lock as kbase_free_alloced_region requires it */ + kbase_gpu_vm_lock(kctx); + +#if defined(CONFIG_ARM64) + same_va_bits = VA_BITS; +#elif defined(CONFIG_X86_64) + same_va_bits = 47; +#elif defined(CONFIG_64BIT) +#error Unsupported 64-bit architecture +#endif + +#ifdef CONFIG_64BIT + if (kbase_ctx_flag(kctx, KCTX_COMPAT)) + same_va_bits = 32; + else if (kbase_hw_has_feature(kctx->kbdev, BASE_HW_FEATURE_33BIT_VA)) + same_va_bits = 33; +#endif + + if (kctx->kbdev->gpu_props.mmu.va_bits < same_va_bits) { + err = -EINVAL; + goto fail_unlock; + } + + same_va_pages = (1ULL << (same_va_bits - PAGE_SHIFT)) - 1; + /* all have SAME_VA */ + same_va_reg = kbase_alloc_free_region(kctx, 1, + same_va_pages, + KBASE_REG_ZONE_SAME_VA); + + if (!same_va_reg) { + err = -ENOMEM; + goto fail_unlock; + } + +#ifdef CONFIG_64BIT + /* 32-bit clients have exec and custom VA zones */ + if (kbase_ctx_flag(kctx, KCTX_COMPAT)) { +#endif + if (gpu_va_limit <= KBASE_REG_ZONE_CUSTOM_VA_BASE) { + err = -EINVAL; + goto fail_free_same_va; + } + /* If the current size of TMEM is out of range of the + * virtual address space addressable by the MMU then + * we should shrink it to fit + */ + if ((KBASE_REG_ZONE_CUSTOM_VA_BASE + KBASE_REG_ZONE_CUSTOM_VA_SIZE) >= gpu_va_limit) + custom_va_size = gpu_va_limit - KBASE_REG_ZONE_CUSTOM_VA_BASE; + + exec_reg = kbase_alloc_free_region(kctx, + KBASE_REG_ZONE_EXEC_BASE, + KBASE_REG_ZONE_EXEC_SIZE, + KBASE_REG_ZONE_EXEC); + + if (!exec_reg) { + err = -ENOMEM; + goto fail_free_same_va; + } + + custom_va_reg = kbase_alloc_free_region(kctx, + KBASE_REG_ZONE_CUSTOM_VA_BASE, + custom_va_size, KBASE_REG_ZONE_CUSTOM_VA); + + if (!custom_va_reg) { + err = -ENOMEM; + goto fail_free_exec; + } +#ifdef CONFIG_64BIT + } +#endif + + kbase_region_tracker_ds_init(kctx, same_va_reg, exec_reg, custom_va_reg); + + kctx->same_va_end = same_va_pages + 1; + + kbase_gpu_vm_unlock(kctx); + return 0; + +fail_free_exec: + kbase_free_alloced_region(exec_reg); +fail_free_same_va: + kbase_free_alloced_region(same_va_reg); +fail_unlock: + kbase_gpu_vm_unlock(kctx); + return err; +} + +int kbase_region_tracker_init_jit(struct kbase_context *kctx, u64 jit_va_pages) +{ +#ifdef CONFIG_64BIT + struct kbase_va_region *same_va; + struct kbase_va_region *custom_va_reg; + u64 same_va_bits; + u64 total_va_size; + int err; + + /* + * Nothing to do for 32-bit clients, JIT uses the existing + * custom VA zone. + */ + if (kbase_ctx_flag(kctx, KCTX_COMPAT)) + return 0; + +#if defined(CONFIG_ARM64) + same_va_bits = VA_BITS; +#elif defined(CONFIG_X86_64) + same_va_bits = 47; +#elif defined(CONFIG_64BIT) +#error Unsupported 64-bit architecture +#endif + + if (kbase_hw_has_feature(kctx->kbdev, BASE_HW_FEATURE_33BIT_VA)) + same_va_bits = 33; + + total_va_size = (1ULL << (same_va_bits - PAGE_SHIFT)) - 1; + + kbase_gpu_vm_lock(kctx); + + /* + * Modify the same VA free region after creation. Be careful to ensure + * that allocations haven't been made as they could cause an overlap + * to happen with existing same VA allocations and the custom VA zone. + */ + same_va = kbase_region_tracker_find_region_base_address(kctx, + PAGE_SIZE); + if (!same_va) { + err = -ENOMEM; + goto fail_unlock; + } + + /* The region flag or region size has changed since creation so bail. */ + if ((!(same_va->flags & KBASE_REG_FREE)) || + (same_va->nr_pages != total_va_size)) { + err = -ENOMEM; + goto fail_unlock; + } + + if (same_va->nr_pages < jit_va_pages || + kctx->same_va_end < jit_va_pages) { + err = -ENOMEM; + goto fail_unlock; + } + + /* It's safe to adjust the same VA zone now */ + same_va->nr_pages -= jit_va_pages; + kctx->same_va_end -= jit_va_pages; + + /* + * Create a custom VA zone at the end of the VA for allocations which + * JIT can use so it doesn't have to allocate VA from the kernel. + */ + custom_va_reg = kbase_alloc_free_region(kctx, + kctx->same_va_end, + jit_va_pages, + KBASE_REG_ZONE_CUSTOM_VA); + + if (!custom_va_reg) { + /* + * The context will be destroyed if we fail here so no point + * reverting the change we made to same_va. + */ + err = -ENOMEM; + goto fail_unlock; + } + + kbase_region_tracker_insert(kctx, custom_va_reg); + + kbase_gpu_vm_unlock(kctx); + return 0; + +fail_unlock: + kbase_gpu_vm_unlock(kctx); + return err; +#else + return 0; +#endif +} + +int kbase_mem_init(struct kbase_device *kbdev) +{ + struct kbasep_mem_device *memdev; + int ret; + + KBASE_DEBUG_ASSERT(kbdev); + + memdev = &kbdev->memdev; + kbdev->mem_pool_max_size_default = KBASE_MEM_POOL_MAX_SIZE_KCTX; + + /* Initialize memory usage */ + atomic_set(&memdev->used_pages, 0); + + ret = kbase_mem_pool_init(&kbdev->mem_pool, + KBASE_MEM_POOL_MAX_SIZE_KBDEV, + KBASE_MEM_POOL_4KB_PAGE_TABLE_ORDER, + kbdev, + NULL); + if (ret) + return ret; + + ret = kbase_mem_pool_init(&kbdev->lp_mem_pool, + (KBASE_MEM_POOL_MAX_SIZE_KBDEV >> 9), + KBASE_MEM_POOL_2MB_PAGE_TABLE_ORDER, + kbdev, + NULL); + if (ret) + kbase_mem_pool_term(&kbdev->mem_pool); + + return ret; +} + +void kbase_mem_halt(struct kbase_device *kbdev) +{ + CSTD_UNUSED(kbdev); +} + +void kbase_mem_term(struct kbase_device *kbdev) +{ + struct kbasep_mem_device *memdev; + int pages; + + KBASE_DEBUG_ASSERT(kbdev); + + memdev = &kbdev->memdev; + + pages = atomic_read(&memdev->used_pages); + if (pages != 0) + dev_warn(kbdev->dev, "%s: %d pages in use!\n", __func__, pages); + + kbase_mem_pool_term(&kbdev->mem_pool); + kbase_mem_pool_term(&kbdev->lp_mem_pool); +} + +KBASE_EXPORT_TEST_API(kbase_mem_term); + + + + +/** + * @brief Allocate a free region object. + * + * The allocated object is not part of any list yet, and is flagged as + * KBASE_REG_FREE. No mapping is allocated yet. + * + * zone is KBASE_REG_ZONE_CUSTOM_VA, KBASE_REG_ZONE_SAME_VA, or KBASE_REG_ZONE_EXEC + * + */ +struct kbase_va_region *kbase_alloc_free_region(struct kbase_context *kctx, u64 start_pfn, size_t nr_pages, int zone) +{ + struct kbase_va_region *new_reg; + + KBASE_DEBUG_ASSERT(kctx != NULL); + + /* zone argument should only contain zone related region flags */ + KBASE_DEBUG_ASSERT((zone & ~KBASE_REG_ZONE_MASK) == 0); + KBASE_DEBUG_ASSERT(nr_pages > 0); + /* 64-bit address range is the max */ + KBASE_DEBUG_ASSERT(start_pfn + nr_pages <= (U64_MAX / PAGE_SIZE)); + + new_reg = kzalloc(sizeof(*new_reg), GFP_KERNEL); + + if (!new_reg) + return NULL; + + new_reg->cpu_alloc = NULL; /* no alloc bound yet */ + new_reg->gpu_alloc = NULL; /* no alloc bound yet */ + new_reg->kctx = kctx; + new_reg->flags = zone | KBASE_REG_FREE; + + new_reg->flags |= KBASE_REG_GROWABLE; + + new_reg->start_pfn = start_pfn; + new_reg->nr_pages = nr_pages; + + return new_reg; +} + +KBASE_EXPORT_TEST_API(kbase_alloc_free_region); + +/** + * @brief Free a region object. + * + * The described region must be freed of any mapping. + * + * If the region is not flagged as KBASE_REG_FREE, the region's + * alloc object will be released. + * It is a bug if no alloc object exists for non-free regions. + * + */ +void kbase_free_alloced_region(struct kbase_va_region *reg) +{ + if (!(reg->flags & KBASE_REG_FREE)) { + /* + * The physical allocation should have been removed from the + * eviction list before this function is called. However, in the + * case of abnormal process termination or the app leaking the + * memory kbase_mem_free_region is not called so it can still be + * on the list at termination time of the region tracker. + */ + if (!list_empty(®->gpu_alloc->evict_node)) { + /* + * Unlink the physical allocation before unmaking it + * evictable so that the allocation isn't grown back to + * its last backed size as we're going to unmap it + * anyway. + */ + reg->cpu_alloc->reg = NULL; + if (reg->cpu_alloc != reg->gpu_alloc) + reg->gpu_alloc->reg = NULL; + + /* + * If a region has been made evictable then we must + * unmake it before trying to free it. + * If the memory hasn't been reclaimed it will be + * unmapped and freed below, if it has been reclaimed + * then the operations below are no-ops. + */ + if (reg->flags & KBASE_REG_DONT_NEED) { + KBASE_DEBUG_ASSERT(reg->cpu_alloc->type == + KBASE_MEM_TYPE_NATIVE); + kbase_mem_evictable_unmake(reg->gpu_alloc); + } + } + + /* + * Remove the region from the sticky resource metadata + * list should it be there. + */ + kbase_sticky_resource_release(reg->kctx, NULL, + reg->start_pfn << PAGE_SHIFT); + + kbase_mem_phy_alloc_put(reg->cpu_alloc); + kbase_mem_phy_alloc_put(reg->gpu_alloc); + /* To detect use-after-free in debug builds */ + KBASE_DEBUG_CODE(reg->flags |= KBASE_REG_FREE); + } + kfree(reg); +} + +KBASE_EXPORT_TEST_API(kbase_free_alloced_region); + +int kbase_gpu_mmap(struct kbase_context *kctx, struct kbase_va_region *reg, u64 addr, size_t nr_pages, size_t align) +{ + int err; + size_t i = 0; + unsigned long attr; + unsigned long mask = ~KBASE_REG_MEMATTR_MASK; + + if ((kctx->kbdev->system_coherency == COHERENCY_ACE) && + (reg->flags & KBASE_REG_SHARE_BOTH)) + attr = KBASE_REG_MEMATTR_INDEX(AS_MEMATTR_INDEX_OUTER_WA); + else + attr = KBASE_REG_MEMATTR_INDEX(AS_MEMATTR_INDEX_WRITE_ALLOC); + + KBASE_DEBUG_ASSERT(NULL != kctx); + KBASE_DEBUG_ASSERT(NULL != reg); + + err = kbase_add_va_region(kctx, reg, addr, nr_pages, align); + if (err) + return err; + + if (reg->gpu_alloc->type == KBASE_MEM_TYPE_ALIAS) { + u64 stride; + struct kbase_mem_phy_alloc *alloc; + + alloc = reg->gpu_alloc; + stride = alloc->imported.alias.stride; + KBASE_DEBUG_ASSERT(alloc->imported.alias.aliased); + for (i = 0; i < alloc->imported.alias.nents; i++) { + if (alloc->imported.alias.aliased[i].alloc) { + err = kbase_mmu_insert_pages(kctx, + reg->start_pfn + (i * stride), + alloc->imported.alias.aliased[i].alloc->pages + alloc->imported.alias.aliased[i].offset, + alloc->imported.alias.aliased[i].length, + reg->flags); + if (err) + goto bad_insert; + + kbase_mem_phy_alloc_gpu_mapped(alloc->imported.alias.aliased[i].alloc); + } else { + err = kbase_mmu_insert_single_page(kctx, + reg->start_pfn + i * stride, + kctx->aliasing_sink_page, + alloc->imported.alias.aliased[i].length, + (reg->flags & mask) | attr); + + if (err) + goto bad_insert; + } + } + } else { + err = kbase_mmu_insert_pages(kctx, reg->start_pfn, + kbase_get_gpu_phy_pages(reg), + kbase_reg_current_backed_size(reg), + reg->flags); + if (err) + goto bad_insert; + kbase_mem_phy_alloc_gpu_mapped(reg->gpu_alloc); + } + + return err; + +bad_insert: + if (reg->gpu_alloc->type == KBASE_MEM_TYPE_ALIAS) { + u64 stride; + + stride = reg->gpu_alloc->imported.alias.stride; + KBASE_DEBUG_ASSERT(reg->gpu_alloc->imported.alias.aliased); + while (i--) + if (reg->gpu_alloc->imported.alias.aliased[i].alloc) { + kbase_mmu_teardown_pages(kctx, reg->start_pfn + (i * stride), reg->gpu_alloc->imported.alias.aliased[i].length); + kbase_mem_phy_alloc_gpu_unmapped(reg->gpu_alloc->imported.alias.aliased[i].alloc); + } + } + + kbase_remove_va_region(kctx, reg); + + return err; +} + +KBASE_EXPORT_TEST_API(kbase_gpu_mmap); + +static void kbase_jd_user_buf_unmap(struct kbase_context *kctx, + struct kbase_mem_phy_alloc *alloc, bool writeable); + +int kbase_gpu_munmap(struct kbase_context *kctx, struct kbase_va_region *reg) +{ + int err; + + if (reg->start_pfn == 0) + return 0; + + if (reg->gpu_alloc && reg->gpu_alloc->type == KBASE_MEM_TYPE_ALIAS) { + size_t i; + + err = kbase_mmu_teardown_pages(kctx, reg->start_pfn, reg->nr_pages); + KBASE_DEBUG_ASSERT(reg->gpu_alloc->imported.alias.aliased); + for (i = 0; i < reg->gpu_alloc->imported.alias.nents; i++) + if (reg->gpu_alloc->imported.alias.aliased[i].alloc) + kbase_mem_phy_alloc_gpu_unmapped(reg->gpu_alloc->imported.alias.aliased[i].alloc); + } else { + err = kbase_mmu_teardown_pages(kctx, reg->start_pfn, kbase_reg_current_backed_size(reg)); + kbase_mem_phy_alloc_gpu_unmapped(reg->gpu_alloc); + } + + if (reg->gpu_alloc && reg->gpu_alloc->type == + KBASE_MEM_TYPE_IMPORTED_USER_BUF) { + struct kbase_alloc_import_user_buf *user_buf = + ®->gpu_alloc->imported.user_buf; + + if (user_buf->current_mapping_usage_count & PINNED_ON_IMPORT) { + user_buf->current_mapping_usage_count &= + ~PINNED_ON_IMPORT; + + kbase_jd_user_buf_unmap(kctx, reg->gpu_alloc, + (reg->flags & KBASE_REG_GPU_WR)); + } + } + + if (err) + return err; + + err = kbase_remove_va_region(kctx, reg); + return err; +} + +static struct kbase_cpu_mapping *kbasep_find_enclosing_cpu_mapping( + struct kbase_context *kctx, + unsigned long uaddr, size_t size, u64 *offset) +{ + struct vm_area_struct *vma; + struct kbase_cpu_mapping *map; + unsigned long vm_pgoff_in_region; + unsigned long vm_off_in_region; + unsigned long map_start; + size_t map_size; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0) + lockdep_assert_held(¤t->mm->mmap_lock); +#else + lockdep_assert_held(¤t->mm->mmap_sem); +#endif + + if ((uintptr_t) uaddr + size < (uintptr_t) uaddr) /* overflow check */ + return NULL; + + vma = find_vma_intersection(current->mm, uaddr, uaddr+size); + + if (!vma || vma->vm_start > uaddr) + return NULL; + if (vma->vm_ops != &kbase_vm_ops) + /* Not ours! */ + return NULL; + + map = vma->vm_private_data; + + if (map->kctx != kctx) + /* Not from this context! */ + return NULL; + + vm_pgoff_in_region = vma->vm_pgoff - map->region->start_pfn; + vm_off_in_region = vm_pgoff_in_region << PAGE_SHIFT; + map_start = vma->vm_start - vm_off_in_region; + map_size = map->region->nr_pages << PAGE_SHIFT; + + if ((uaddr + size) > (map_start + map_size)) + /* Not within the CPU mapping */ + return NULL; + + *offset = (uaddr - vma->vm_start) + vm_off_in_region; + + return map; +} + +int kbasep_find_enclosing_cpu_mapping_offset( + struct kbase_context *kctx, + unsigned long uaddr, size_t size, u64 *offset) +{ + struct kbase_cpu_mapping *map; + + kbase_os_mem_map_lock(kctx); + + map = kbasep_find_enclosing_cpu_mapping(kctx, uaddr, size, offset); + + kbase_os_mem_map_unlock(kctx); + + if (!map) + return -EINVAL; + + return 0; +} + +KBASE_EXPORT_TEST_API(kbasep_find_enclosing_cpu_mapping_offset); + +void kbase_sync_single(struct kbase_context *kctx, + struct tagged_addr t_cpu_pa, struct tagged_addr t_gpu_pa, + off_t offset, size_t size, enum kbase_sync_type sync_fn) +{ + struct page *cpu_page; + phys_addr_t cpu_pa = as_phys_addr_t(t_cpu_pa); + phys_addr_t gpu_pa = as_phys_addr_t(t_gpu_pa); + + cpu_page = pfn_to_page(PFN_DOWN(cpu_pa)); + + if (likely(cpu_pa == gpu_pa)) { + dma_addr_t dma_addr; + + BUG_ON(!cpu_page); + BUG_ON(offset + size > PAGE_SIZE); + + dma_addr = kbase_dma_addr(cpu_page) + offset; + if (sync_fn == KBASE_SYNC_TO_CPU) + dma_sync_single_for_cpu(kctx->kbdev->dev, dma_addr, + size, DMA_BIDIRECTIONAL); + else if (sync_fn == KBASE_SYNC_TO_DEVICE) + dma_sync_single_for_device(kctx->kbdev->dev, dma_addr, + size, DMA_BIDIRECTIONAL); + } else { + void *src = NULL; + void *dst = NULL; + struct page *gpu_page; + + if (WARN(!gpu_pa, "No GPU PA found for infinite cache op")) + return; + + gpu_page = pfn_to_page(PFN_DOWN(gpu_pa)); + + if (sync_fn == KBASE_SYNC_TO_DEVICE) { + src = ((unsigned char *)kmap(cpu_page)) + offset; + dst = ((unsigned char *)kmap(gpu_page)) + offset; + } else if (sync_fn == KBASE_SYNC_TO_CPU) { + dma_sync_single_for_cpu(kctx->kbdev->dev, + kbase_dma_addr(gpu_page) + offset, + size, DMA_BIDIRECTIONAL); + src = ((unsigned char *)kmap(gpu_page)) + offset; + dst = ((unsigned char *)kmap(cpu_page)) + offset; + } + memcpy(dst, src, size); + kunmap(gpu_page); + kunmap(cpu_page); + if (sync_fn == KBASE_SYNC_TO_DEVICE) + dma_sync_single_for_device(kctx->kbdev->dev, + kbase_dma_addr(gpu_page) + offset, + size, DMA_BIDIRECTIONAL); + } +} + +static int kbase_do_syncset(struct kbase_context *kctx, + struct basep_syncset *sset, enum kbase_sync_type sync_fn) +{ + int err = 0; + struct kbase_va_region *reg; + struct kbase_cpu_mapping *map; + unsigned long start; + size_t size; + struct tagged_addr *cpu_pa; + struct tagged_addr *gpu_pa; + u64 page_off, page_count; + u64 i; + u64 offset; + + kbase_os_mem_map_lock(kctx); + kbase_gpu_vm_lock(kctx); + + /* find the region where the virtual address is contained */ + reg = kbase_region_tracker_find_region_enclosing_address(kctx, + sset->mem_handle.basep.handle); + if (!reg) { + dev_warn(kctx->kbdev->dev, "Can't find region at VA 0x%016llX", + sset->mem_handle.basep.handle); + err = -EINVAL; + goto out_unlock; + } + + if (!(reg->flags & KBASE_REG_CPU_CACHED) || + kbase_mem_is_imported(reg->gpu_alloc->type)) + goto out_unlock; + + start = (uintptr_t)sset->user_addr; + size = (size_t)sset->size; + + map = kbasep_find_enclosing_cpu_mapping(kctx, start, size, &offset); + if (!map) { + dev_warn(kctx->kbdev->dev, "Can't find CPU mapping 0x%016lX for VA 0x%016llX", + start, sset->mem_handle.basep.handle); + err = -EINVAL; + goto out_unlock; + } + + page_off = offset >> PAGE_SHIFT; + offset &= ~PAGE_MASK; + page_count = (size + offset + (PAGE_SIZE - 1)) >> PAGE_SHIFT; + cpu_pa = kbase_get_cpu_phy_pages(reg); + gpu_pa = kbase_get_gpu_phy_pages(reg); + + if (page_off > reg->nr_pages || + page_off + page_count > reg->nr_pages) { + /* Sync overflows the region */ + err = -EINVAL; + goto out_unlock; + } + + /* Sync first page */ + if (as_phys_addr_t(cpu_pa[page_off])) { + size_t sz = MIN(((size_t) PAGE_SIZE - offset), size); + + kbase_sync_single(kctx, cpu_pa[page_off], gpu_pa[page_off], + offset, sz, sync_fn); + } + + /* Sync middle pages (if any) */ + for (i = 1; page_count > 2 && i < page_count - 1; i++) { + /* we grow upwards, so bail on first non-present page */ + if (!as_phys_addr_t(cpu_pa[page_off + i])) + break; + + kbase_sync_single(kctx, cpu_pa[page_off + i], + gpu_pa[page_off + i], 0, PAGE_SIZE, sync_fn); + } + + /* Sync last page (if any) */ + if (page_count > 1 && + as_phys_addr_t(cpu_pa[page_off + page_count - 1])) { + size_t sz = ((start + size - 1) & ~PAGE_MASK) + 1; + + kbase_sync_single(kctx, cpu_pa[page_off + page_count - 1], + gpu_pa[page_off + page_count - 1], 0, sz, + sync_fn); + } + +out_unlock: + kbase_gpu_vm_unlock(kctx); + kbase_os_mem_map_unlock(kctx); + return err; +} + +int kbase_sync_now(struct kbase_context *kctx, struct basep_syncset *sset) +{ + int err = -EINVAL; + + KBASE_DEBUG_ASSERT(kctx != NULL); + KBASE_DEBUG_ASSERT(sset != NULL); + + if (sset->mem_handle.basep.handle & ~PAGE_MASK) { + dev_warn(kctx->kbdev->dev, + "mem_handle: passed parameter is invalid"); + return -EINVAL; + } + + switch (sset->type) { + case BASE_SYNCSET_OP_MSYNC: + err = kbase_do_syncset(kctx, sset, KBASE_SYNC_TO_DEVICE); + break; + + case BASE_SYNCSET_OP_CSYNC: + err = kbase_do_syncset(kctx, sset, KBASE_SYNC_TO_CPU); + break; + + default: + dev_warn(kctx->kbdev->dev, "Unknown msync op %d\n", sset->type); + break; + } + + return err; +} + +KBASE_EXPORT_TEST_API(kbase_sync_now); + +/* vm lock must be held */ +int kbase_mem_free_region(struct kbase_context *kctx, struct kbase_va_region *reg) +{ + int err; + + KBASE_DEBUG_ASSERT(NULL != kctx); + KBASE_DEBUG_ASSERT(NULL != reg); + lockdep_assert_held(&kctx->reg_lock); + + /* + * Unlink the physical allocation before unmaking it evictable so + * that the allocation isn't grown back to its last backed size + * as we're going to unmap it anyway. + */ + reg->cpu_alloc->reg = NULL; + if (reg->cpu_alloc != reg->gpu_alloc) + reg->gpu_alloc->reg = NULL; + + /* + * If a region has been made evictable then we must unmake it + * before trying to free it. + * If the memory hasn't been reclaimed it will be unmapped and freed + * below, if it has been reclaimed then the operations below are no-ops. + */ + if (reg->flags & KBASE_REG_DONT_NEED) { + KBASE_DEBUG_ASSERT(reg->cpu_alloc->type == + KBASE_MEM_TYPE_NATIVE); + kbase_mem_evictable_unmake(reg->gpu_alloc); + } + + err = kbase_gpu_munmap(kctx, reg); + if (err) { + dev_warn(reg->kctx->kbdev->dev, "Could not unmap from the GPU...\n"); + goto out; + } + + /* This will also free the physical pages */ + kbase_free_alloced_region(reg); + + out: + return err; +} + +KBASE_EXPORT_TEST_API(kbase_mem_free_region); + +/** + * @brief Free the region from the GPU and unregister it. + * + * This function implements the free operation on a memory segment. + * It will loudly fail if called with outstanding mappings. + */ +int kbase_mem_free(struct kbase_context *kctx, u64 gpu_addr) +{ + int err = 0; + struct kbase_va_region *reg; + + KBASE_DEBUG_ASSERT(kctx != NULL); + + if ((gpu_addr & ~PAGE_MASK) && (gpu_addr >= PAGE_SIZE)) { + dev_warn(kctx->kbdev->dev, "kbase_mem_free: gpu_addr parameter is invalid"); + return -EINVAL; + } + + if (0 == gpu_addr) { + dev_warn(kctx->kbdev->dev, "gpu_addr 0 is reserved for the ringbuffer and it's an error to try to free it using kbase_mem_free\n"); + return -EINVAL; + } + kbase_gpu_vm_lock(kctx); + + if (gpu_addr >= BASE_MEM_COOKIE_BASE && + gpu_addr < BASE_MEM_FIRST_FREE_ADDRESS) { + int cookie = PFN_DOWN(gpu_addr - BASE_MEM_COOKIE_BASE); + + reg = kctx->pending_regions[cookie]; + if (!reg) { + err = -EINVAL; + goto out_unlock; + } + + /* ask to unlink the cookie as we'll free it */ + + kctx->pending_regions[cookie] = NULL; + kctx->cookies |= (1UL << cookie); + + kbase_free_alloced_region(reg); + } else { + /* A real GPU va */ + /* Validate the region */ + reg = kbase_region_tracker_find_region_base_address(kctx, gpu_addr); + if (!reg || (reg->flags & KBASE_REG_FREE)) { + dev_warn(kctx->kbdev->dev, "kbase_mem_free called with nonexistent gpu_addr 0x%llX", + gpu_addr); + err = -EINVAL; + goto out_unlock; + } + + if ((reg->flags & KBASE_REG_ZONE_MASK) == KBASE_REG_ZONE_SAME_VA) { + /* SAME_VA must be freed through munmap */ + dev_warn(kctx->kbdev->dev, "%s called on SAME_VA memory 0x%llX", __func__, + gpu_addr); + err = -EINVAL; + goto out_unlock; + } + err = kbase_mem_free_region(kctx, reg); + } + + out_unlock: + kbase_gpu_vm_unlock(kctx); + return err; +} + +KBASE_EXPORT_TEST_API(kbase_mem_free); + +int kbase_update_region_flags(struct kbase_context *kctx, + struct kbase_va_region *reg, unsigned long flags) +{ + KBASE_DEBUG_ASSERT(NULL != reg); + KBASE_DEBUG_ASSERT((flags & ~((1ul << BASE_MEM_FLAGS_NR_BITS) - 1)) == 0); + + reg->flags |= kbase_cache_enabled(flags, reg->nr_pages); + /* all memory is now growable */ + reg->flags |= KBASE_REG_GROWABLE; + + if (flags & BASE_MEM_GROW_ON_GPF) + reg->flags |= KBASE_REG_PF_GROW; + + if (flags & BASE_MEM_PROT_CPU_WR) + reg->flags |= KBASE_REG_CPU_WR; + + if (flags & BASE_MEM_PROT_CPU_RD) + reg->flags |= KBASE_REG_CPU_RD; + + if (flags & BASE_MEM_PROT_GPU_WR) + reg->flags |= KBASE_REG_GPU_WR; + + if (flags & BASE_MEM_PROT_GPU_RD) + reg->flags |= KBASE_REG_GPU_RD; + + if (0 == (flags & BASE_MEM_PROT_GPU_EX)) + reg->flags |= KBASE_REG_GPU_NX; + + if (!kbase_device_is_cpu_coherent(kctx->kbdev)) { + if (flags & BASE_MEM_COHERENT_SYSTEM_REQUIRED) + return -EINVAL; + } else if (flags & (BASE_MEM_COHERENT_SYSTEM | + BASE_MEM_COHERENT_SYSTEM_REQUIRED)) { + reg->flags |= KBASE_REG_SHARE_BOTH; + } + + if (!(reg->flags & KBASE_REG_SHARE_BOTH) && + flags & BASE_MEM_COHERENT_LOCAL) { + reg->flags |= KBASE_REG_SHARE_IN; + } + + /* Set up default MEMATTR usage */ + if (kctx->kbdev->system_coherency == COHERENCY_ACE && + (reg->flags & KBASE_REG_SHARE_BOTH)) { + reg->flags |= + KBASE_REG_MEMATTR_INDEX(AS_MEMATTR_INDEX_DEFAULT_ACE); + } else { + reg->flags |= + KBASE_REG_MEMATTR_INDEX(AS_MEMATTR_INDEX_DEFAULT); + } + + return 0; +} + +int kbase_alloc_phy_pages_helper( + struct kbase_mem_phy_alloc *alloc, + size_t nr_pages_requested) +{ + int new_page_count __maybe_unused; + size_t old_page_count = alloc->nents; + size_t nr_left = nr_pages_requested; + int res; + struct kbase_context *kctx; + struct tagged_addr *tp; + + KBASE_DEBUG_ASSERT(alloc->type == KBASE_MEM_TYPE_NATIVE); + KBASE_DEBUG_ASSERT(alloc->imported.kctx); + + if (alloc->reg) { + if (nr_pages_requested > alloc->reg->nr_pages - alloc->nents) + goto invalid_request; + } + + kctx = alloc->imported.kctx; + + if (nr_pages_requested == 0) + goto done; /*nothing to do*/ + + new_page_count = kbase_atomic_add_pages( + nr_pages_requested, &kctx->used_pages); + kbase_atomic_add_pages(nr_pages_requested, + &kctx->kbdev->memdev.used_pages); + + /* Increase mm counters before we allocate pages so that this + * allocation is visible to the OOM killer */ + kbase_process_page_usage_inc(kctx, nr_pages_requested); + + tp = alloc->pages + old_page_count; + +#ifdef CONFIG_MALI_2MB_ALLOC + /* Check if we have enough pages requested so we can allocate a large + * page (512 * 4KB = 2MB ) + */ + if (nr_left >= (SZ_2M / SZ_4K)) { + int nr_lp = nr_left / (SZ_2M / SZ_4K); + + res = kbase_mem_pool_alloc_pages(&kctx->lp_mem_pool, + nr_lp * (SZ_2M / SZ_4K), + tp, + true); + + if (res > 0) { + nr_left -= res; + tp += res; + } + + if (nr_left) { + struct kbase_sub_alloc *sa, *temp_sa; + + mutex_lock(&kctx->mem_partials_lock); + + list_for_each_entry_safe(sa, temp_sa, + &kctx->mem_partials, link) { + int pidx = 0; + + while (nr_left) { + pidx = find_next_zero_bit(sa->sub_pages, + SZ_2M / SZ_4K, + pidx); + bitmap_set(sa->sub_pages, pidx, 1); + *tp++ = as_tagged_tag(page_to_phys(sa->page + + pidx), + FROM_PARTIAL); + nr_left--; + + if (bitmap_full(sa->sub_pages, SZ_2M / SZ_4K)) { + /* unlink from partial list when full */ + list_del_init(&sa->link); + break; + } + } + } + mutex_unlock(&kctx->mem_partials_lock); + } + + /* only if we actually have a chunk left <512. If more it indicates + * that we couldn't allocate a 2MB above, so no point to retry here. + */ + if (nr_left > 0 && nr_left < (SZ_2M / SZ_4K)) { + /* create a new partial and suballocate the rest from it */ + struct page *np = NULL; + + do { + int err; + + np = kbase_mem_pool_alloc(&kctx->lp_mem_pool); + if (np) + break; + err = kbase_mem_pool_grow(&kctx->lp_mem_pool, 1); + if (err) + break; + } while (1); + + if (np) { + int i; + struct kbase_sub_alloc *sa; + struct page *p; + + sa = kmalloc(sizeof(*sa), GFP_KERNEL); + if (!sa) { + kbase_mem_pool_free(&kctx->lp_mem_pool, np, false); + goto no_new_partial; + } + + /* store pointers back to the control struct */ + np->lru.next = (void *)sa; + for (p = np; p < np + SZ_2M / SZ_4K; p++) + p->lru.prev = (void *)np; + INIT_LIST_HEAD(&sa->link); + bitmap_zero(sa->sub_pages, SZ_2M / SZ_4K); + sa->page = np; + + for (i = 0; i < nr_left; i++) + *tp++ = as_tagged_tag(page_to_phys(np + i), FROM_PARTIAL); + + bitmap_set(sa->sub_pages, 0, nr_left); + nr_left = 0; + + /* expose for later use */ + mutex_lock(&kctx->mem_partials_lock); + list_add(&sa->link, &kctx->mem_partials); + mutex_unlock(&kctx->mem_partials_lock); + } + } + } +no_new_partial: +#endif + + if (nr_left) { + res = kbase_mem_pool_alloc_pages(&kctx->mem_pool, + nr_left, + tp, + false); + if (res <= 0) + goto alloc_failed; + } + + /* + * Request a zone cache update, this scans only the new pages an + * appends their information to the zone cache. if the update + * fails then clear the cache so we fall-back to doing things + * page by page. + */ + if (kbase_zone_cache_update(alloc, old_page_count) != 0) + kbase_zone_cache_clear(alloc); + + KBASE_TLSTREAM_AUX_PAGESALLOC( + kctx->id, + (u64)new_page_count); + + alloc->nents += nr_pages_requested; +done: + return 0; + +alloc_failed: + /* rollback needed if got one or more 2MB but failed later */ + if (nr_left != nr_pages_requested) + kbase_mem_pool_free_pages(&kctx->lp_mem_pool, + nr_pages_requested - nr_left, + alloc->pages + old_page_count, + false, + false); + + kbase_process_page_usage_dec(kctx, nr_pages_requested); + kbase_atomic_sub_pages(nr_pages_requested, &kctx->used_pages); + kbase_atomic_sub_pages(nr_pages_requested, + &kctx->kbdev->memdev.used_pages); + +invalid_request: + return -ENOMEM; +} + +static void free_partial(struct kbase_context *kctx, struct tagged_addr tp) +{ + struct page *p, *head_page; + struct kbase_sub_alloc *sa; + + p = phys_to_page(as_phys_addr_t(tp)); + head_page = (struct page *)p->lru.prev; + sa = (struct kbase_sub_alloc *)head_page->lru.next; + mutex_lock(&kctx->mem_partials_lock); + clear_bit(p - head_page, sa->sub_pages); + if (bitmap_empty(sa->sub_pages, SZ_2M / SZ_4K)) { + list_del(&sa->link); + kbase_mem_pool_free(&kctx->lp_mem_pool, head_page, true); + kfree(sa); + } else if (bitmap_weight(sa->sub_pages, SZ_2M / SZ_4K) == + SZ_2M / SZ_4K - 1) { + /* expose the partial again */ + list_add(&sa->link, &kctx->mem_partials); + } + mutex_unlock(&kctx->mem_partials_lock); +} + +int kbase_free_phy_pages_helper( + struct kbase_mem_phy_alloc *alloc, + size_t nr_pages_to_free) +{ + struct kbase_context *kctx = alloc->imported.kctx; + bool syncback; + bool reclaimed = (alloc->evicted != 0); + struct tagged_addr *start_free; + int new_page_count __maybe_unused; + size_t freed = 0; + + KBASE_DEBUG_ASSERT(alloc->type == KBASE_MEM_TYPE_NATIVE); + KBASE_DEBUG_ASSERT(alloc->imported.kctx); + KBASE_DEBUG_ASSERT(alloc->nents >= nr_pages_to_free); + + /* early out if nothing to do */ + if (0 == nr_pages_to_free) + return 0; + + start_free = alloc->pages + alloc->nents - nr_pages_to_free; + + syncback = alloc->properties & KBASE_MEM_PHY_ALLOC_ACCESSED_CACHED; + + /* pad start_free to a valid start location */ + while (nr_pages_to_free && is_huge(*start_free) && + !is_huge_head(*start_free)) { + nr_pages_to_free--; + start_free++; + } + + /* + * Clear the zone cache, we don't expect JIT allocations to be + * shrunk in parts so there is no point trying to optimize for that + * by scanning for the changes caused by freeing this memory and + * updating the existing cache entries. + */ + kbase_zone_cache_clear(alloc); + + + while (nr_pages_to_free) { + if (is_huge_head(*start_free)) { + /* This is a 2MB entry, so free all the 512 pages that + * it points to + */ + kbase_mem_pool_free_pages(&kctx->lp_mem_pool, + 512, + start_free, + syncback, + reclaimed); + nr_pages_to_free -= 512; + start_free += 512; + freed += 512; + } else if (is_partial(*start_free)) { + free_partial(kctx, *start_free); + nr_pages_to_free--; + start_free++; + freed++; + } else { + struct tagged_addr *local_end_free; + + local_end_free = start_free; + while (nr_pages_to_free && + !is_huge(*local_end_free) && + !is_partial(*local_end_free)) { + local_end_free++; + nr_pages_to_free--; + } + kbase_mem_pool_free_pages(&kctx->mem_pool, + local_end_free - start_free, + start_free, + syncback, + reclaimed); + freed += local_end_free - start_free; + start_free += local_end_free - start_free; + } + } + + alloc->nents -= freed; + + /* + * If the allocation was not evicted (i.e. evicted == 0) then + * the page accounting needs to be done. + */ + if (!reclaimed) { + kbase_process_page_usage_dec(kctx, freed); + new_page_count = kbase_atomic_sub_pages(freed, + &kctx->used_pages); + kbase_atomic_sub_pages(freed, + &kctx->kbdev->memdev.used_pages); + + KBASE_TLSTREAM_AUX_PAGESALLOC( + kctx->id, + (u64)new_page_count); + } + + return 0; +} + +void kbase_mem_kref_free(struct kref *kref) +{ + struct kbase_mem_phy_alloc *alloc; + + alloc = container_of(kref, struct kbase_mem_phy_alloc, kref); + + switch (alloc->type) { + case KBASE_MEM_TYPE_NATIVE: { + WARN_ON(!alloc->imported.kctx); + /* + * The physical allocation must have been removed from the + * eviction list before trying to free it. + */ + WARN_ON(!list_empty(&alloc->evict_node)); + kbase_free_phy_pages_helper(alloc, alloc->nents); + break; + } + case KBASE_MEM_TYPE_ALIAS: { + /* just call put on the underlying phy allocs */ + size_t i; + struct kbase_aliased *aliased; + + aliased = alloc->imported.alias.aliased; + if (aliased) { + for (i = 0; i < alloc->imported.alias.nents; i++) + if (aliased[i].alloc) + kbase_mem_phy_alloc_put(aliased[i].alloc); + vfree(aliased); + } + break; + } + case KBASE_MEM_TYPE_RAW: + /* raw pages, external cleanup */ + break; + #ifdef CONFIG_UMP + case KBASE_MEM_TYPE_IMPORTED_UMP: + ump_dd_release(alloc->imported.ump_handle); + break; +#endif +#ifdef CONFIG_DMA_SHARED_BUFFER + case KBASE_MEM_TYPE_IMPORTED_UMM: + dma_buf_detach(alloc->imported.umm.dma_buf, + alloc->imported.umm.dma_attachment); + dma_buf_put(alloc->imported.umm.dma_buf); + break; +#endif + case KBASE_MEM_TYPE_IMPORTED_USER_BUF: + if (alloc->imported.user_buf.mm) + mmdrop(alloc->imported.user_buf.mm); + kfree(alloc->imported.user_buf.pages); + break; + case KBASE_MEM_TYPE_TB:{ + void *tb; + + tb = alloc->imported.kctx->jctx.tb; + kbase_device_trace_buffer_uninstall(alloc->imported.kctx); + vfree(tb); + break; + } + default: + WARN(1, "Unexecpted free of type %d\n", alloc->type); + break; + } + + /* Free based on allocation type */ + if (alloc->properties & KBASE_MEM_PHY_ALLOC_LARGE) + vfree(alloc); + else + kfree(alloc); +} + +KBASE_EXPORT_TEST_API(kbase_mem_kref_free); + +int kbase_alloc_phy_pages(struct kbase_va_region *reg, size_t vsize, size_t size) +{ + KBASE_DEBUG_ASSERT(NULL != reg); + KBASE_DEBUG_ASSERT(vsize > 0); + + /* validate user provided arguments */ + if (size > vsize || vsize > reg->nr_pages) + goto out_term; + + /* Prevent vsize*sizeof from wrapping around. + * For instance, if vsize is 2**29+1, we'll allocate 1 byte and the alloc won't fail. + */ + if ((size_t) vsize > ((size_t) -1 / sizeof(*reg->cpu_alloc->pages))) + goto out_term; + + KBASE_DEBUG_ASSERT(0 != vsize); + + if (kbase_alloc_phy_pages_helper(reg->cpu_alloc, size) != 0) + goto out_term; + + reg->cpu_alloc->reg = reg; + if (reg->cpu_alloc != reg->gpu_alloc) { + if (kbase_alloc_phy_pages_helper(reg->gpu_alloc, size) != 0) + goto out_rollback; + reg->gpu_alloc->reg = reg; + } + + return 0; + +out_rollback: + kbase_free_phy_pages_helper(reg->cpu_alloc, size); +out_term: + return -1; +} + +KBASE_EXPORT_TEST_API(kbase_alloc_phy_pages); + +bool kbase_check_alloc_flags(unsigned long flags) +{ + /* Only known input flags should be set. */ + if (flags & ~BASE_MEM_FLAGS_INPUT_MASK) + return false; + + /* At least one flag should be set */ + if (flags == 0) + return false; + + /* Either the GPU or CPU must be reading from the allocated memory */ + if ((flags & (BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_RD)) == 0) + return false; + + /* Either the GPU or CPU must be writing to the allocated memory */ + if ((flags & (BASE_MEM_PROT_CPU_WR | BASE_MEM_PROT_GPU_WR)) == 0) + return false; + + /* GPU cannot be writing to GPU executable memory and cannot grow the memory on page fault. */ + if ((flags & BASE_MEM_PROT_GPU_EX) && (flags & (BASE_MEM_PROT_GPU_WR | BASE_MEM_GROW_ON_GPF))) + return false; + + /* GPU should have at least read or write access otherwise there is no + reason for allocating. */ + if ((flags & (BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_GPU_WR)) == 0) + return false; + + /* BASE_MEM_IMPORT_SHARED is only valid for imported memory */ + if ((flags & BASE_MEM_IMPORT_SHARED) == BASE_MEM_IMPORT_SHARED) + return false; + + return true; +} + +bool kbase_check_import_flags(unsigned long flags) +{ + /* Only known input flags should be set. */ + if (flags & ~BASE_MEM_FLAGS_INPUT_MASK) + return false; + + /* At least one flag should be set */ + if (flags == 0) + return false; + + /* Imported memory cannot be GPU executable */ + if (flags & BASE_MEM_PROT_GPU_EX) + return false; + + /* Imported memory cannot grow on page fault */ + if (flags & BASE_MEM_GROW_ON_GPF) + return false; + + /* GPU should have at least read or write access otherwise there is no + reason for importing. */ + if ((flags & (BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_GPU_WR)) == 0) + return false; + + /* Secure memory cannot be read by the CPU */ + if ((flags & BASE_MEM_SECURE) && (flags & BASE_MEM_PROT_CPU_RD)) + return false; + + return true; +} + +/** + * @brief Acquire the per-context region list lock + */ +void kbase_gpu_vm_lock(struct kbase_context *kctx) +{ + KBASE_DEBUG_ASSERT(kctx != NULL); + mutex_lock(&kctx->reg_lock); +} + +KBASE_EXPORT_TEST_API(kbase_gpu_vm_lock); + +/** + * @brief Release the per-context region list lock + */ +void kbase_gpu_vm_unlock(struct kbase_context *kctx) +{ + KBASE_DEBUG_ASSERT(kctx != NULL); + mutex_unlock(&kctx->reg_lock); +} + +KBASE_EXPORT_TEST_API(kbase_gpu_vm_unlock); + +#ifdef CONFIG_DEBUG_FS +struct kbase_jit_debugfs_data { + int (*func)(struct kbase_jit_debugfs_data *); + struct mutex lock; + struct kbase_context *kctx; + u64 active_value; + u64 pool_value; + u64 destroy_value; + char buffer[50]; +}; + +static int kbase_jit_debugfs_common_open(struct inode *inode, + struct file *file, int (*func)(struct kbase_jit_debugfs_data *)) +{ + struct kbase_jit_debugfs_data *data; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->func = func; + mutex_init(&data->lock); + data->kctx = (struct kbase_context *) inode->i_private; + + file->private_data = data; + + return nonseekable_open(inode, file); +} + +static ssize_t kbase_jit_debugfs_common_read(struct file *file, + char __user *buf, size_t len, loff_t *ppos) +{ + struct kbase_jit_debugfs_data *data; + size_t size; + int ret; + + data = (struct kbase_jit_debugfs_data *) file->private_data; + mutex_lock(&data->lock); + + if (*ppos) { + size = strnlen(data->buffer, sizeof(data->buffer)); + } else { + if (!data->func) { + ret = -EACCES; + goto out_unlock; + } + + if (data->func(data)) { + ret = -EACCES; + goto out_unlock; + } + + size = scnprintf(data->buffer, sizeof(data->buffer), + "%llu,%llu,%llu", data->active_value, + data->pool_value, data->destroy_value); + } + + ret = simple_read_from_buffer(buf, len, ppos, data->buffer, size); + +out_unlock: + mutex_unlock(&data->lock); + return ret; +} + +static int kbase_jit_debugfs_common_release(struct inode *inode, + struct file *file) +{ + kfree(file->private_data); + return 0; +} + +#define KBASE_JIT_DEBUGFS_DECLARE(__fops, __func) \ +static int __fops ## _open(struct inode *inode, struct file *file) \ +{ \ + return kbase_jit_debugfs_common_open(inode, file, __func); \ +} \ +static const struct file_operations __fops = { \ + .owner = THIS_MODULE, \ + .open = __fops ## _open, \ + .release = kbase_jit_debugfs_common_release, \ + .read = kbase_jit_debugfs_common_read, \ + .write = NULL, \ + .llseek = generic_file_llseek, \ +} + +static int kbase_jit_debugfs_count_get(struct kbase_jit_debugfs_data *data) +{ + struct kbase_context *kctx = data->kctx; + struct list_head *tmp; + + mutex_lock(&kctx->jit_evict_lock); + list_for_each(tmp, &kctx->jit_active_head) { + data->active_value++; + } + + list_for_each(tmp, &kctx->jit_pool_head) { + data->pool_value++; + } + + list_for_each(tmp, &kctx->jit_destroy_head) { + data->destroy_value++; + } + mutex_unlock(&kctx->jit_evict_lock); + + return 0; +} +KBASE_JIT_DEBUGFS_DECLARE(kbase_jit_debugfs_count_fops, + kbase_jit_debugfs_count_get); + +static int kbase_jit_debugfs_vm_get(struct kbase_jit_debugfs_data *data) +{ + struct kbase_context *kctx = data->kctx; + struct kbase_va_region *reg; + + mutex_lock(&kctx->jit_evict_lock); + list_for_each_entry(reg, &kctx->jit_active_head, jit_node) { + data->active_value += reg->nr_pages; + } + + list_for_each_entry(reg, &kctx->jit_pool_head, jit_node) { + data->pool_value += reg->nr_pages; + } + + list_for_each_entry(reg, &kctx->jit_destroy_head, jit_node) { + data->destroy_value += reg->nr_pages; + } + mutex_unlock(&kctx->jit_evict_lock); + + return 0; +} +KBASE_JIT_DEBUGFS_DECLARE(kbase_jit_debugfs_vm_fops, + kbase_jit_debugfs_vm_get); + +static int kbase_jit_debugfs_phys_get(struct kbase_jit_debugfs_data *data) +{ + struct kbase_context *kctx = data->kctx; + struct kbase_va_region *reg; + + mutex_lock(&kctx->jit_evict_lock); + list_for_each_entry(reg, &kctx->jit_active_head, jit_node) { + data->active_value += reg->gpu_alloc->nents; + } + + list_for_each_entry(reg, &kctx->jit_pool_head, jit_node) { + data->pool_value += reg->gpu_alloc->nents; + } + + list_for_each_entry(reg, &kctx->jit_destroy_head, jit_node) { + data->destroy_value += reg->gpu_alloc->nents; + } + mutex_unlock(&kctx->jit_evict_lock); + + return 0; +} +KBASE_JIT_DEBUGFS_DECLARE(kbase_jit_debugfs_phys_fops, + kbase_jit_debugfs_phys_get); + +void kbase_jit_debugfs_init(struct kbase_context *kctx) +{ + /* Debugfs entry for getting the number of JIT allocations. */ + debugfs_create_file("mem_jit_count", S_IRUGO, kctx->kctx_dentry, + kctx, &kbase_jit_debugfs_count_fops); + + /* + * Debugfs entry for getting the total number of virtual pages + * used by JIT allocations. + */ + debugfs_create_file("mem_jit_vm", S_IRUGO, kctx->kctx_dentry, + kctx, &kbase_jit_debugfs_vm_fops); + + /* + * Debugfs entry for getting the number of physical pages used + * by JIT allocations. + */ + debugfs_create_file("mem_jit_phys", S_IRUGO, kctx->kctx_dentry, + kctx, &kbase_jit_debugfs_phys_fops); +} +#endif /* CONFIG_DEBUG_FS */ + +/** + * kbase_jit_destroy_worker - Deferred worker which frees JIT allocations + * @work: Work item + * + * This function does the work of freeing JIT allocations whose physical + * backing has been released. + */ +static void kbase_jit_destroy_worker(struct work_struct *work) +{ + struct kbase_context *kctx; + struct kbase_va_region *reg; + + kctx = container_of(work, struct kbase_context, jit_work); + do { + mutex_lock(&kctx->jit_evict_lock); + if (list_empty(&kctx->jit_destroy_head)) { + mutex_unlock(&kctx->jit_evict_lock); + break; + } + + reg = list_first_entry(&kctx->jit_destroy_head, + struct kbase_va_region, jit_node); + + list_del(®->jit_node); + mutex_unlock(&kctx->jit_evict_lock); + + kbase_gpu_vm_lock(kctx); + kbase_mem_free_region(kctx, reg); + kbase_gpu_vm_unlock(kctx); + } while (1); +} + +int kbase_jit_init(struct kbase_context *kctx) +{ + INIT_LIST_HEAD(&kctx->jit_active_head); + INIT_LIST_HEAD(&kctx->jit_pool_head); + INIT_LIST_HEAD(&kctx->jit_destroy_head); + INIT_WORK(&kctx->jit_work, kbase_jit_destroy_worker); + + INIT_LIST_HEAD(&kctx->jit_pending_alloc); + INIT_LIST_HEAD(&kctx->jit_atoms_head); + + return 0; +} + +struct kbase_va_region *kbase_jit_allocate(struct kbase_context *kctx, + struct base_jit_alloc_info *info) +{ + struct kbase_va_region *reg = NULL; + struct kbase_va_region *walker; + struct kbase_va_region *temp; + size_t current_diff = SIZE_MAX; + + int ret; + + mutex_lock(&kctx->jit_evict_lock); + /* + * Scan the pool for an existing allocation which meets our + * requirements and remove it. + */ + list_for_each_entry_safe(walker, temp, &kctx->jit_pool_head, jit_node) { + + if (walker->nr_pages >= info->va_pages) { + size_t min_size, max_size, diff; + + /* + * The JIT allocations VA requirements have been + * meet, it's suitable but other allocations + * might be a better fit. + */ + min_size = min_t(size_t, walker->gpu_alloc->nents, + info->commit_pages); + max_size = max_t(size_t, walker->gpu_alloc->nents, + info->commit_pages); + diff = max_size - min_size; + + if (current_diff > diff) { + current_diff = diff; + reg = walker; + } + + /* The allocation is an exact match, stop looking */ + if (current_diff == 0) + break; + } + } + + if (reg) { + /* + * Remove the found region from the pool and add it to the + * active list. + */ + list_move(®->jit_node, &kctx->jit_active_head); + + /* + * Remove the allocation from the eviction list as it's no + * longer eligible for eviction. This must be done before + * dropping the jit_evict_lock + */ + list_del_init(®->gpu_alloc->evict_node); + mutex_unlock(&kctx->jit_evict_lock); + + kbase_gpu_vm_lock(kctx); + + /* Make the physical backing no longer reclaimable */ + if (!kbase_mem_evictable_unmake(reg->gpu_alloc)) + goto update_failed; + + /* Grow the backing if required */ + if (reg->gpu_alloc->nents < info->commit_pages) { + size_t delta; + size_t old_size = reg->gpu_alloc->nents; + + /* Allocate some more pages */ + delta = info->commit_pages - reg->gpu_alloc->nents; + if (kbase_alloc_phy_pages_helper(reg->gpu_alloc, delta) + != 0) + goto update_failed; + + if (reg->cpu_alloc != reg->gpu_alloc) { + if (kbase_alloc_phy_pages_helper( + reg->cpu_alloc, delta) != 0) { + kbase_free_phy_pages_helper( + reg->gpu_alloc, delta); + goto update_failed; + } + } + + ret = kbase_mem_grow_gpu_mapping(kctx, reg, + info->commit_pages, old_size); + /* + * The grow failed so put the allocation back in the + * pool and return failure. + */ + if (ret) + goto update_failed; + } + kbase_gpu_vm_unlock(kctx); + } else { + /* No suitable JIT allocation was found so create a new one */ + u64 flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_RD | + BASE_MEM_PROT_GPU_WR | BASE_MEM_GROW_ON_GPF | + BASE_MEM_COHERENT_LOCAL; + u64 gpu_addr; + + mutex_unlock(&kctx->jit_evict_lock); + + reg = kbase_mem_alloc(kctx, info->va_pages, info->commit_pages, + info->extent, &flags, &gpu_addr); + if (!reg) + goto out_unlocked; + + mutex_lock(&kctx->jit_evict_lock); + list_add(®->jit_node, &kctx->jit_active_head); + mutex_unlock(&kctx->jit_evict_lock); + } + + return reg; + +update_failed: + /* + * An update to an allocation from the pool failed, chances + * are slim a new allocation would fair any better so return + * the allocation to the pool and return the function with failure. + */ + kbase_gpu_vm_unlock(kctx); + mutex_lock(&kctx->jit_evict_lock); + list_move(®->jit_node, &kctx->jit_pool_head); + mutex_unlock(&kctx->jit_evict_lock); +out_unlocked: + return NULL; +} + +void kbase_jit_free(struct kbase_context *kctx, struct kbase_va_region *reg) +{ + /* The physical backing of memory in the pool is always reclaimable */ + kbase_gpu_vm_lock(kctx); + kbase_mem_evictable_make(reg->gpu_alloc); + kbase_gpu_vm_unlock(kctx); + + mutex_lock(&kctx->jit_evict_lock); + list_move(®->jit_node, &kctx->jit_pool_head); + mutex_unlock(&kctx->jit_evict_lock); +} + +void kbase_jit_backing_lost(struct kbase_va_region *reg) +{ + struct kbase_context *kctx = reg->kctx; + + lockdep_assert_held(&kctx->jit_evict_lock); + + /* + * JIT allocations will always be on a list, if the region + * is not on a list then it's not a JIT allocation. + */ + if (list_empty(®->jit_node)) + return; + + /* + * Freeing the allocation requires locks we might not be able + * to take now, so move the allocation to the free list and kick + * the worker which will do the freeing. + */ + list_move(®->jit_node, &kctx->jit_destroy_head); + + schedule_work(&kctx->jit_work); +} + +bool kbase_jit_evict(struct kbase_context *kctx) +{ + struct kbase_va_region *reg = NULL; + + lockdep_assert_held(&kctx->reg_lock); + + /* Free the oldest allocation from the pool */ + mutex_lock(&kctx->jit_evict_lock); + if (!list_empty(&kctx->jit_pool_head)) { + reg = list_entry(kctx->jit_pool_head.prev, + struct kbase_va_region, jit_node); + list_del(®->jit_node); + } + mutex_unlock(&kctx->jit_evict_lock); + + if (reg) + kbase_mem_free_region(kctx, reg); + + return (reg != NULL); +} + +void kbase_jit_term(struct kbase_context *kctx) +{ + struct kbase_va_region *walker; + + /* Free all allocations for this context */ + + /* + * Flush the freeing of allocations whose backing has been freed + * (i.e. everything in jit_destroy_head). + */ + cancel_work_sync(&kctx->jit_work); + + kbase_gpu_vm_lock(kctx); + mutex_lock(&kctx->jit_evict_lock); + /* Free all allocations from the pool */ + while (!list_empty(&kctx->jit_pool_head)) { + walker = list_first_entry(&kctx->jit_pool_head, + struct kbase_va_region, jit_node); + list_del(&walker->jit_node); + mutex_unlock(&kctx->jit_evict_lock); + kbase_mem_free_region(kctx, walker); + mutex_lock(&kctx->jit_evict_lock); + } + + /* Free all allocations from active list */ + while (!list_empty(&kctx->jit_active_head)) { + walker = list_first_entry(&kctx->jit_active_head, + struct kbase_va_region, jit_node); + list_del(&walker->jit_node); + mutex_unlock(&kctx->jit_evict_lock); + kbase_mem_free_region(kctx, walker); + mutex_lock(&kctx->jit_evict_lock); + } + mutex_unlock(&kctx->jit_evict_lock); + kbase_gpu_vm_unlock(kctx); +} + +static int kbase_jd_user_buf_map(struct kbase_context *kctx, + struct kbase_va_region *reg) +{ + long pinned_pages; + struct kbase_mem_phy_alloc *alloc; + struct page **pages; + struct tagged_addr *pa; + long i; + int err = -ENOMEM; + unsigned long address; + struct mm_struct *mm; + struct device *dev; + unsigned long offset; + unsigned long local_size; + + alloc = reg->gpu_alloc; + pa = kbase_get_gpu_phy_pages(reg); + address = alloc->imported.user_buf.address; + mm = alloc->imported.user_buf.mm; + + KBASE_DEBUG_ASSERT(alloc->type == KBASE_MEM_TYPE_IMPORTED_USER_BUF); + + pages = alloc->imported.user_buf.pages; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) + pinned_pages = get_user_pages(NULL, mm, + address, + alloc->imported.user_buf.nr_pages, + reg->flags & KBASE_REG_GPU_WR, + 0, pages, NULL); +#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0) + pinned_pages = get_user_pages_remote(NULL, mm, + address, + alloc->imported.user_buf.nr_pages, + reg->flags & KBASE_REG_GPU_WR, + 0, pages, NULL); +#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) + pinned_pages = get_user_pages_remote(NULL, mm, + address, + alloc->imported.user_buf.nr_pages, + reg->flags & KBASE_REG_GPU_WR ? FOLL_WRITE : 0, + pages, NULL); +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(5, 9, 0) + pinned_pages = get_user_pages_remote(mm, + address, + alloc->imported.user_buf.nr_pages, + reg->flags & KBASE_REG_GPU_WR ? FOLL_WRITE : 0, + pages, NULL, NULL); +#else + pinned_pages = get_user_pages_remote(NULL, mm, + address, + alloc->imported.user_buf.nr_pages, + reg->flags & KBASE_REG_GPU_WR ? FOLL_WRITE : 0, + pages, NULL, NULL); +#endif + + if (pinned_pages <= 0) + return pinned_pages; + + if (pinned_pages != alloc->imported.user_buf.nr_pages) { + for (i = 0; i < pinned_pages; i++) + put_page(pages[i]); + return -ENOMEM; + } + + dev = kctx->kbdev->dev; + offset = address & ~PAGE_MASK; + local_size = alloc->imported.user_buf.size; + + for (i = 0; i < pinned_pages; i++) { + dma_addr_t dma_addr; + unsigned long min; + + min = MIN(PAGE_SIZE - offset, local_size); + dma_addr = dma_map_page(dev, pages[i], + offset, min, + DMA_BIDIRECTIONAL); + if (dma_mapping_error(dev, dma_addr)) + goto unwind; + + alloc->imported.user_buf.dma_addrs[i] = dma_addr; + pa[i] = as_tagged(page_to_phys(pages[i])); + + local_size -= min; + offset = 0; + } + + alloc->nents = pinned_pages; + + err = kbase_mmu_insert_pages(kctx, reg->start_pfn, pa, + kbase_reg_current_backed_size(reg), + reg->flags); + if (err == 0) + return 0; + + alloc->nents = 0; + /* fall down */ +unwind: + while (i--) { + dma_unmap_page(kctx->kbdev->dev, + alloc->imported.user_buf.dma_addrs[i], + PAGE_SIZE, DMA_BIDIRECTIONAL); + } + + while (++i < pinned_pages) { + put_page(pages[i]); + pages[i] = NULL; + } + + return err; +} + +static void kbase_jd_user_buf_unmap(struct kbase_context *kctx, + struct kbase_mem_phy_alloc *alloc, bool writeable) +{ + long i; + struct page **pages; + unsigned long size = alloc->imported.user_buf.size; + + KBASE_DEBUG_ASSERT(alloc->type == KBASE_MEM_TYPE_IMPORTED_USER_BUF); + pages = alloc->imported.user_buf.pages; + for (i = 0; i < alloc->imported.user_buf.nr_pages; i++) { + unsigned long local_size; + dma_addr_t dma_addr = alloc->imported.user_buf.dma_addrs[i]; + + local_size = MIN(size, PAGE_SIZE - (dma_addr & ~PAGE_MASK)); + dma_unmap_page(kctx->kbdev->dev, dma_addr, local_size, + DMA_BIDIRECTIONAL); + if (writeable) + set_page_dirty_lock(pages[i]); + put_page(pages[i]); + pages[i] = NULL; + + size -= local_size; + } + alloc->nents = 0; +} + +#ifdef CONFIG_DMA_SHARED_BUFFER +static int kbase_jd_umm_map(struct kbase_context *kctx, + struct kbase_va_region *reg) +{ + struct sg_table *sgt; + struct scatterlist *s; + int i; + struct tagged_addr *pa; + int err; + size_t count = 0; + struct kbase_mem_phy_alloc *alloc; + + alloc = reg->gpu_alloc; + + KBASE_DEBUG_ASSERT(alloc->type == KBASE_MEM_TYPE_IMPORTED_UMM); + KBASE_DEBUG_ASSERT(NULL == alloc->imported.umm.sgt); + sgt = dma_buf_map_attachment(alloc->imported.umm.dma_attachment, + DMA_BIDIRECTIONAL); + + if (IS_ERR_OR_NULL(sgt)) + return -EINVAL; + + /* save for later */ + alloc->imported.umm.sgt = sgt; + + pa = kbase_get_gpu_phy_pages(reg); + KBASE_DEBUG_ASSERT(pa); + + for_each_sg(sgt->sgl, s, sgt->nents, i) { + size_t j, pages = PFN_UP(sg_dma_len(s)); + + WARN_ONCE(sg_dma_len(s) & (PAGE_SIZE-1), + "sg_dma_len(s)=%u is not a multiple of PAGE_SIZE\n", + sg_dma_len(s)); + + WARN_ONCE(sg_dma_address(s) & (PAGE_SIZE-1), + "sg_dma_address(s)=%llx is not aligned to PAGE_SIZE\n", + (unsigned long long) sg_dma_address(s)); + + for (j = 0; (j < pages) && (count < reg->nr_pages); j++, + count++) + *pa++ = as_tagged(sg_dma_address(s) + + (j << PAGE_SHIFT)); + WARN_ONCE(j < pages, + "sg list from dma_buf_map_attachment > dma_buf->size=%zu\n", + alloc->imported.umm.dma_buf->size); + } + + if (!(reg->flags & KBASE_REG_IMPORT_PAD) && + WARN_ONCE(count < reg->nr_pages, + "sg list from dma_buf_map_attachment < dma_buf->size=%zu\n", + alloc->imported.umm.dma_buf->size)) { + err = -EINVAL; + goto err_unmap_attachment; + } + + /* Update nents as we now have pages to map */ + alloc->nents = reg->nr_pages; + + err = kbase_mmu_insert_pages(kctx, reg->start_pfn, + kbase_get_gpu_phy_pages(reg), + count, + reg->flags | KBASE_REG_GPU_WR | KBASE_REG_GPU_RD); + if (err) + goto err_unmap_attachment; + + if (reg->flags & KBASE_REG_IMPORT_PAD) { + err = kbase_mmu_insert_single_page(kctx, + reg->start_pfn + count, + kctx->aliasing_sink_page, + reg->nr_pages - count, + (reg->flags | KBASE_REG_GPU_RD) & + ~KBASE_REG_GPU_WR); + if (err) + goto err_teardown_orig_pages; + } + + return 0; + +err_teardown_orig_pages: + kbase_mmu_teardown_pages(kctx, reg->start_pfn, count); +err_unmap_attachment: + dma_buf_unmap_attachment(alloc->imported.umm.dma_attachment, + alloc->imported.umm.sgt, DMA_BIDIRECTIONAL); + alloc->imported.umm.sgt = NULL; + + return err; +} + +static void kbase_jd_umm_unmap(struct kbase_context *kctx, + struct kbase_mem_phy_alloc *alloc) +{ + KBASE_DEBUG_ASSERT(kctx); + KBASE_DEBUG_ASSERT(alloc); + KBASE_DEBUG_ASSERT(alloc->imported.umm.dma_attachment); + KBASE_DEBUG_ASSERT(alloc->imported.umm.sgt); + dma_buf_unmap_attachment(alloc->imported.umm.dma_attachment, + alloc->imported.umm.sgt, DMA_BIDIRECTIONAL); + alloc->imported.umm.sgt = NULL; + alloc->nents = 0; +} +#endif /* CONFIG_DMA_SHARED_BUFFER */ + +struct kbase_mem_phy_alloc *kbase_map_external_resource( + struct kbase_context *kctx, struct kbase_va_region *reg, + struct mm_struct *locked_mm) +{ + int err; + + /* decide what needs to happen for this resource */ + switch (reg->gpu_alloc->type) { + case KBASE_MEM_TYPE_IMPORTED_USER_BUF: { + if (reg->gpu_alloc->imported.user_buf.mm != locked_mm) + goto exit; + + reg->gpu_alloc->imported.user_buf.current_mapping_usage_count++; + if (1 == reg->gpu_alloc->imported.user_buf.current_mapping_usage_count) { + err = kbase_jd_user_buf_map(kctx, reg); + if (err) { + reg->gpu_alloc->imported.user_buf.current_mapping_usage_count--; + goto exit; + } + } + } + break; + case KBASE_MEM_TYPE_IMPORTED_UMP: { + break; + } +#ifdef CONFIG_DMA_SHARED_BUFFER + case KBASE_MEM_TYPE_IMPORTED_UMM: { + reg->gpu_alloc->imported.umm.current_mapping_usage_count++; + if (1 == reg->gpu_alloc->imported.umm.current_mapping_usage_count) { + err = kbase_jd_umm_map(kctx, reg); + if (err) { + reg->gpu_alloc->imported.umm.current_mapping_usage_count--; + goto exit; + } + } + break; + } +#endif + default: + goto exit; + } + + return kbase_mem_phy_alloc_get(reg->gpu_alloc); +exit: + return NULL; +} + +void kbase_unmap_external_resource(struct kbase_context *kctx, + struct kbase_va_region *reg, struct kbase_mem_phy_alloc *alloc) +{ + switch (alloc->type) { +#ifdef CONFIG_DMA_SHARED_BUFFER + case KBASE_MEM_TYPE_IMPORTED_UMM: { + alloc->imported.umm.current_mapping_usage_count--; + + if (0 == alloc->imported.umm.current_mapping_usage_count) { + if (reg && reg->gpu_alloc == alloc) { + int err; + + err = kbase_mmu_teardown_pages( + kctx, + reg->start_pfn, + alloc->nents); + WARN_ON(err); + } + + kbase_jd_umm_unmap(kctx, alloc); + } + } + break; +#endif /* CONFIG_DMA_SHARED_BUFFER */ + case KBASE_MEM_TYPE_IMPORTED_USER_BUF: { + alloc->imported.user_buf.current_mapping_usage_count--; + + if (0 == alloc->imported.user_buf.current_mapping_usage_count) { + bool writeable = true; + + if (reg && reg->gpu_alloc == alloc) + kbase_mmu_teardown_pages( + kctx, + reg->start_pfn, + kbase_reg_current_backed_size(reg)); + + if (reg && ((reg->flags & KBASE_REG_GPU_WR) == 0)) + writeable = false; + + kbase_jd_user_buf_unmap(kctx, alloc, writeable); + } + } + break; + default: + break; + } + kbase_mem_phy_alloc_put(alloc); +} + +struct kbase_ctx_ext_res_meta *kbase_sticky_resource_acquire( + struct kbase_context *kctx, u64 gpu_addr) +{ + struct kbase_ctx_ext_res_meta *meta = NULL; + struct kbase_ctx_ext_res_meta *walker; + + lockdep_assert_held(&kctx->reg_lock); + + /* + * Walk the per context external resource metadata list for the + * metadata which matches the region which is being acquired. + */ + list_for_each_entry(walker, &kctx->ext_res_meta_head, ext_res_node) { + if (walker->gpu_addr == gpu_addr) { + meta = walker; + break; + } + } + + /* No metadata exists so create one. */ + if (!meta) { + struct kbase_va_region *reg; + + /* Find the region */ + reg = kbase_region_tracker_find_region_enclosing_address( + kctx, gpu_addr); + if (NULL == reg || (reg->flags & KBASE_REG_FREE)) + goto failed; + + /* Allocate the metadata object */ + meta = kzalloc(sizeof(*meta), GFP_KERNEL); + if (!meta) + goto failed; + + /* + * Fill in the metadata object and acquire a reference + * for the physical resource. + */ + meta->alloc = kbase_map_external_resource(kctx, reg, NULL); + + if (!meta->alloc) + goto fail_map; + + meta->gpu_addr = reg->start_pfn << PAGE_SHIFT; + + list_add(&meta->ext_res_node, &kctx->ext_res_meta_head); + } + + return meta; + +fail_map: + kfree(meta); +failed: + return NULL; +} + +bool kbase_sticky_resource_release(struct kbase_context *kctx, + struct kbase_ctx_ext_res_meta *meta, u64 gpu_addr) +{ + struct kbase_ctx_ext_res_meta *walker; + struct kbase_va_region *reg; + + lockdep_assert_held(&kctx->reg_lock); + + /* Search of the metadata if one isn't provided. */ + if (!meta) { + /* + * Walk the per context external resource metadata list for the + * metadata which matches the region which is being released. + */ + list_for_each_entry(walker, &kctx->ext_res_meta_head, + ext_res_node) { + if (walker->gpu_addr == gpu_addr) { + meta = walker; + break; + } + } + } + + /* No metadata so just return. */ + if (!meta) + return false; + + /* Drop the physical memory reference and free the metadata. */ + reg = kbase_region_tracker_find_region_enclosing_address( + kctx, + meta->gpu_addr); + + kbase_unmap_external_resource(kctx, reg, meta->alloc); + list_del(&meta->ext_res_node); + kfree(meta); + + return true; +} + +int kbase_sticky_resource_init(struct kbase_context *kctx) +{ + INIT_LIST_HEAD(&kctx->ext_res_meta_head); + + return 0; +} + +void kbase_sticky_resource_term(struct kbase_context *kctx) +{ + struct kbase_ctx_ext_res_meta *walker; + + lockdep_assert_held(&kctx->reg_lock); + + /* + * Free any sticky resources which haven't been unmapped. + * + * Note: + * We don't care about refcounts at this point as no future + * references to the meta data will be made. + * Region termination would find these if we didn't free them + * here, but it's more efficient if we do the clean up here. + */ + while (!list_empty(&kctx->ext_res_meta_head)) { + walker = list_first_entry(&kctx->ext_res_meta_head, + struct kbase_ctx_ext_res_meta, ext_res_node); + + kbase_sticky_resource_release(kctx, walker, 0); + } +} diff --git a/drivers/gpu/arm/midgard/mali_kbase_mem.h b/drivers/gpu/arm/midgard/mali_kbase_mem.h new file mode 100644 index 00000000000000..12f771effccd4a --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_mem.h @@ -0,0 +1,1127 @@ +/* + * + * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + + +/** + * @file mali_kbase_mem.h + * Base kernel memory APIs + */ + +#ifndef _KBASE_MEM_H_ +#define _KBASE_MEM_H_ + +#ifndef _KBASE_H_ +#error "Don't include this file directly, use mali_kbase.h instead" +#endif + +#include +#ifdef CONFIG_UMP +#include +#endif /* CONFIG_UMP */ +#include "mali_base_kernel.h" +#include +#include "mali_kbase_pm.h" +#include "mali_kbase_defs.h" +#if defined(CONFIG_MALI_GATOR_SUPPORT) +#include "mali_kbase_gator.h" +#endif +/* Required for kbase_mem_evictable_unmake */ +#include "mali_kbase_mem_linux.h" + +/* Part of the workaround for uTLB invalid pages is to ensure we grow/shrink tmem by 4 pages at a time */ +#define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2_HW_ISSUE_8316 (2) /* round to 4 pages */ + +/* Part of the workaround for PRLAM-9630 requires us to grow/shrink memory by 8 pages. +The MMU reads in 8 page table entries from memory at a time, if we have more than one page fault within the same 8 pages and +page tables are updated accordingly, the MMU does not re-read the page table entries from memory for the subsequent page table +updates and generates duplicate page faults as the page table information used by the MMU is not valid. */ +#define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2_HW_ISSUE_9630 (3) /* round to 8 pages */ + +#define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2 (0) /* round to 1 page */ + +/* This must always be a power of 2 */ +#define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES (1u << KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2) +#define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_HW_ISSUE_8316 (1u << KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2_HW_ISSUE_8316) +#define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_HW_ISSUE_9630 (1u << KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2_HW_ISSUE_9630) +/** + * A CPU mapping + */ +struct kbase_cpu_mapping { + struct list_head mappings_list; + struct kbase_mem_phy_alloc *alloc; + struct kbase_context *kctx; + struct kbase_va_region *region; + int count; + int free_on_close; +}; + +enum kbase_memory_type { + KBASE_MEM_TYPE_NATIVE, + KBASE_MEM_TYPE_IMPORTED_UMP, + KBASE_MEM_TYPE_IMPORTED_UMM, + KBASE_MEM_TYPE_IMPORTED_USER_BUF, + KBASE_MEM_TYPE_ALIAS, + KBASE_MEM_TYPE_TB, + KBASE_MEM_TYPE_RAW +}; + +/* internal structure, mirroring base_mem_aliasing_info, + * but with alloc instead of a gpu va (handle) */ +struct kbase_aliased { + struct kbase_mem_phy_alloc *alloc; /* NULL for special, non-NULL for native */ + u64 offset; /* in pages */ + u64 length; /* in pages */ +}; + +/** + * @brief Physical pages tracking object properties + */ +#define KBASE_MEM_PHY_ALLOC_ACCESSED_CACHED (1ul << 0) +#define KBASE_MEM_PHY_ALLOC_LARGE (1ul << 1) + +/* physical pages tracking object. + * Set up to track N pages. + * N not stored here, the creator holds that info. + * This object only tracks how many elements are actually valid (present). + * Changing of nents or *pages should only happen if the kbase_mem_phy_alloc is not + * shared with another region or client. CPU mappings are OK to exist when changing, as + * long as the tracked mappings objects are updated as part of the change. + */ +struct kbase_mem_phy_alloc { + struct kref kref; /* number of users of this alloc */ + atomic_t gpu_mappings; + size_t nents; /* 0..N */ + struct tagged_addr *pages; /* N elements, only 0..nents are valid */ + + /* kbase_cpu_mappings */ + struct list_head mappings; + + /* Node used to store this allocation on the eviction list */ + struct list_head evict_node; + /* Physical backing size when the pages where evicted */ + size_t evicted; + /* + * Back reference to the region structure which created this + * allocation, or NULL if it has been freed. + */ + struct kbase_va_region *reg; + + /* type of buffer */ + enum kbase_memory_type type; + + unsigned long properties; + + struct list_head zone_cache; + + /* member in union valid based on @a type */ + union { +#ifdef CONFIG_UMP + ump_dd_handle ump_handle; +#endif /* CONFIG_UMP */ +#if defined(CONFIG_DMA_SHARED_BUFFER) + struct { + struct dma_buf *dma_buf; + struct dma_buf_attachment *dma_attachment; + unsigned int current_mapping_usage_count; + struct sg_table *sgt; + } umm; +#endif /* defined(CONFIG_DMA_SHARED_BUFFER) */ + struct { + u64 stride; + size_t nents; + struct kbase_aliased *aliased; + } alias; + /* Used by type = (KBASE_MEM_TYPE_NATIVE, KBASE_MEM_TYPE_TB) */ + struct kbase_context *kctx; + struct kbase_alloc_import_user_buf { + unsigned long address; + unsigned long size; + unsigned long nr_pages; + struct page **pages; + /* top bit (1<<31) of current_mapping_usage_count + * specifies that this import was pinned on import + * See PINNED_ON_IMPORT + */ + u32 current_mapping_usage_count; + struct mm_struct *mm; + dma_addr_t *dma_addrs; + } user_buf; + } imported; +}; + +/* The top bit of kbase_alloc_import_user_buf::current_mapping_usage_count is + * used to signify that a buffer was pinned when it was imported. Since the + * reference count is limited by the number of atoms that can be submitted at + * once there should be no danger of overflowing into this bit. + * Stealing the top bit also has the benefit that + * current_mapping_usage_count != 0 if and only if the buffer is mapped. + */ +#define PINNED_ON_IMPORT (1<<31) + +static inline void kbase_mem_phy_alloc_gpu_mapped(struct kbase_mem_phy_alloc *alloc) +{ + KBASE_DEBUG_ASSERT(alloc); + /* we only track mappings of NATIVE buffers */ + if (alloc->type == KBASE_MEM_TYPE_NATIVE) + atomic_inc(&alloc->gpu_mappings); +} + +static inline void kbase_mem_phy_alloc_gpu_unmapped(struct kbase_mem_phy_alloc *alloc) +{ + KBASE_DEBUG_ASSERT(alloc); + /* we only track mappings of NATIVE buffers */ + if (alloc->type == KBASE_MEM_TYPE_NATIVE) + if (0 > atomic_dec_return(&alloc->gpu_mappings)) { + pr_err("Mismatched %s:\n", __func__); + dump_stack(); + } +} + +/** + * kbase_mem_is_imported - Indicate whether a memory type is imported + * + * @type: the memory type + * + * Return: true if the memory type is imported, false otherwise + */ +static inline bool kbase_mem_is_imported(enum kbase_memory_type type) +{ + return (type == KBASE_MEM_TYPE_IMPORTED_UMP) || + (type == KBASE_MEM_TYPE_IMPORTED_UMM) || + (type == KBASE_MEM_TYPE_IMPORTED_USER_BUF); +} + +void kbase_mem_kref_free(struct kref *kref); + +int kbase_mem_init(struct kbase_device *kbdev); +void kbase_mem_halt(struct kbase_device *kbdev); +void kbase_mem_term(struct kbase_device *kbdev); + +static inline struct kbase_mem_phy_alloc *kbase_mem_phy_alloc_get(struct kbase_mem_phy_alloc *alloc) +{ + kref_get(&alloc->kref); + return alloc; +} + +static inline struct kbase_mem_phy_alloc *kbase_mem_phy_alloc_put(struct kbase_mem_phy_alloc *alloc) +{ + kref_put(&alloc->kref, kbase_mem_kref_free); + return NULL; +} + +/** + * A GPU memory region, and attributes for CPU mappings. + */ +struct kbase_va_region { + struct rb_node rblink; + struct list_head link; + + struct kbase_context *kctx; /* Backlink to base context */ + + u64 start_pfn; /* The PFN in GPU space */ + size_t nr_pages; + +/* Free region */ +#define KBASE_REG_FREE (1ul << 0) +/* CPU write access */ +#define KBASE_REG_CPU_WR (1ul << 1) +/* GPU write access */ +#define KBASE_REG_GPU_WR (1ul << 2) +/* No eXecute flag */ +#define KBASE_REG_GPU_NX (1ul << 3) +/* Is CPU cached? */ +#define KBASE_REG_CPU_CACHED (1ul << 4) +/* Is GPU cached? */ +#define KBASE_REG_GPU_CACHED (1ul << 5) + +#define KBASE_REG_GROWABLE (1ul << 6) +/* Can grow on pf? */ +#define KBASE_REG_PF_GROW (1ul << 7) + +/* Bit 8 is unused */ + +/* inner shareable coherency */ +#define KBASE_REG_SHARE_IN (1ul << 9) +/* inner & outer shareable coherency */ +#define KBASE_REG_SHARE_BOTH (1ul << 10) + +/* Space for 4 different zones */ +#define KBASE_REG_ZONE_MASK (3ul << 11) +#define KBASE_REG_ZONE(x) (((x) & 3) << 11) + +/* GPU read access */ +#define KBASE_REG_GPU_RD (1ul<<13) +/* CPU read access */ +#define KBASE_REG_CPU_RD (1ul<<14) + +/* Index of chosen MEMATTR for this region (0..7) */ +#define KBASE_REG_MEMATTR_MASK (7ul << 16) +#define KBASE_REG_MEMATTR_INDEX(x) (((x) & 7) << 16) +#define KBASE_REG_MEMATTR_VALUE(x) (((x) & KBASE_REG_MEMATTR_MASK) >> 16) + +#define KBASE_REG_SECURE (1ul << 19) + +#define KBASE_REG_DONT_NEED (1ul << 20) + +/* Imported buffer is padded? */ +#define KBASE_REG_IMPORT_PAD (1ul << 21) + +/* Bit 22 is reserved. + * + * Do not remove, use the next unreserved bit for new flags */ +#define KBASE_REG_RESERVED_BIT_22 (1ul << 22) + +#define KBASE_REG_ZONE_SAME_VA KBASE_REG_ZONE(0) + +/* only used with 32-bit clients */ +/* + * On a 32bit platform, custom VA should be wired from (4GB + shader region) + * to the VA limit of the GPU. Unfortunately, the Linux mmap() interface + * limits us to 2^32 pages (2^44 bytes, see mmap64 man page for reference). + * So we put the default limit to the maximum possible on Linux and shrink + * it down, if required by the GPU, during initialization. + */ + +/* + * Dedicated 16MB region for shader code: + * VA range 0x101000000-0x102000000 + */ +#define KBASE_REG_ZONE_EXEC KBASE_REG_ZONE(1) +#define KBASE_REG_ZONE_EXEC_BASE (0x101000000ULL >> PAGE_SHIFT) +#define KBASE_REG_ZONE_EXEC_SIZE ((16ULL * 1024 * 1024) >> PAGE_SHIFT) + +#define KBASE_REG_ZONE_CUSTOM_VA KBASE_REG_ZONE(2) +#define KBASE_REG_ZONE_CUSTOM_VA_BASE (KBASE_REG_ZONE_EXEC_BASE + KBASE_REG_ZONE_EXEC_SIZE) /* Starting after KBASE_REG_ZONE_EXEC */ +#define KBASE_REG_ZONE_CUSTOM_VA_SIZE (((1ULL << 44) >> PAGE_SHIFT) - KBASE_REG_ZONE_CUSTOM_VA_BASE) +/* end 32-bit clients only */ + + unsigned long flags; + + size_t extent; /* nr of pages alloc'd on PF */ + + struct kbase_mem_phy_alloc *cpu_alloc; /* the one alloc object we mmap to the CPU when mapping this region */ + struct kbase_mem_phy_alloc *gpu_alloc; /* the one alloc object we mmap to the GPU when mapping this region */ + + /* List head used to store the region in the JIT allocation pool */ + struct list_head jit_node; +}; + +/* Common functions */ +static inline struct tagged_addr *kbase_get_cpu_phy_pages( + struct kbase_va_region *reg) +{ + KBASE_DEBUG_ASSERT(reg); + KBASE_DEBUG_ASSERT(reg->cpu_alloc); + KBASE_DEBUG_ASSERT(reg->gpu_alloc); + KBASE_DEBUG_ASSERT(reg->cpu_alloc->nents == reg->gpu_alloc->nents); + + return reg->cpu_alloc->pages; +} + +static inline struct tagged_addr *kbase_get_gpu_phy_pages( + struct kbase_va_region *reg) +{ + KBASE_DEBUG_ASSERT(reg); + KBASE_DEBUG_ASSERT(reg->cpu_alloc); + KBASE_DEBUG_ASSERT(reg->gpu_alloc); + KBASE_DEBUG_ASSERT(reg->cpu_alloc->nents == reg->gpu_alloc->nents); + + return reg->gpu_alloc->pages; +} + +static inline size_t kbase_reg_current_backed_size(struct kbase_va_region *reg) +{ + KBASE_DEBUG_ASSERT(reg); + /* if no alloc object the backed size naturally is 0 */ + if (!reg->cpu_alloc) + return 0; + + KBASE_DEBUG_ASSERT(reg->cpu_alloc); + KBASE_DEBUG_ASSERT(reg->gpu_alloc); + KBASE_DEBUG_ASSERT(reg->cpu_alloc->nents == reg->gpu_alloc->nents); + + return reg->cpu_alloc->nents; +} + +#define KBASE_MEM_PHY_ALLOC_LARGE_THRESHOLD ((size_t)(4*1024)) /* size above which vmalloc is used over kmalloc */ + +static inline struct kbase_mem_phy_alloc *kbase_alloc_create(size_t nr_pages, enum kbase_memory_type type) +{ + struct kbase_mem_phy_alloc *alloc; + size_t alloc_size = sizeof(*alloc) + sizeof(*alloc->pages) * nr_pages; + size_t per_page_size = sizeof(*alloc->pages); + + /* Imported pages may have page private data already in use */ + if (type == KBASE_MEM_TYPE_IMPORTED_USER_BUF) { + alloc_size += nr_pages * + sizeof(*alloc->imported.user_buf.dma_addrs); + per_page_size += sizeof(*alloc->imported.user_buf.dma_addrs); + } + + /* + * Prevent nr_pages*per_page_size + sizeof(*alloc) from + * wrapping around. + */ + if (nr_pages > ((((size_t) -1) - sizeof(*alloc)) + / per_page_size)) + return ERR_PTR(-ENOMEM); + + /* Allocate based on the size to reduce internal fragmentation of vmem */ + if (alloc_size > KBASE_MEM_PHY_ALLOC_LARGE_THRESHOLD) + alloc = vzalloc(alloc_size); + else + alloc = kzalloc(alloc_size, GFP_KERNEL); + + if (!alloc) + return ERR_PTR(-ENOMEM); + + /* Store allocation method */ + if (alloc_size > KBASE_MEM_PHY_ALLOC_LARGE_THRESHOLD) + alloc->properties |= KBASE_MEM_PHY_ALLOC_LARGE; + + kref_init(&alloc->kref); + atomic_set(&alloc->gpu_mappings, 0); + alloc->nents = 0; + alloc->pages = (void *)(alloc + 1); + INIT_LIST_HEAD(&alloc->mappings); + alloc->type = type; + INIT_LIST_HEAD(&alloc->zone_cache); + + if (type == KBASE_MEM_TYPE_IMPORTED_USER_BUF) + alloc->imported.user_buf.dma_addrs = + (void *) (alloc->pages + nr_pages); + + return alloc; +} + +static inline int kbase_reg_prepare_native(struct kbase_va_region *reg, + struct kbase_context *kctx) +{ + KBASE_DEBUG_ASSERT(reg); + KBASE_DEBUG_ASSERT(!reg->cpu_alloc); + KBASE_DEBUG_ASSERT(!reg->gpu_alloc); + KBASE_DEBUG_ASSERT(reg->flags & KBASE_REG_FREE); + + reg->cpu_alloc = kbase_alloc_create(reg->nr_pages, + KBASE_MEM_TYPE_NATIVE); + if (IS_ERR(reg->cpu_alloc)) + return PTR_ERR(reg->cpu_alloc); + else if (!reg->cpu_alloc) + return -ENOMEM; + reg->cpu_alloc->imported.kctx = kctx; + INIT_LIST_HEAD(®->cpu_alloc->evict_node); + if (kbase_ctx_flag(kctx, KCTX_INFINITE_CACHE) + && (reg->flags & KBASE_REG_CPU_CACHED)) { + reg->gpu_alloc = kbase_alloc_create(reg->nr_pages, + KBASE_MEM_TYPE_NATIVE); + reg->gpu_alloc->imported.kctx = kctx; + INIT_LIST_HEAD(®->gpu_alloc->evict_node); + } else { + reg->gpu_alloc = kbase_mem_phy_alloc_get(reg->cpu_alloc); + } + + INIT_LIST_HEAD(®->jit_node); + reg->flags &= ~KBASE_REG_FREE; + return 0; +} + +static inline int kbase_atomic_add_pages(int num_pages, atomic_t *used_pages) +{ + int new_val = atomic_add_return(num_pages, used_pages); +#if defined(CONFIG_MALI_GATOR_SUPPORT) + kbase_trace_mali_total_alloc_pages_change((long long int)new_val); +#endif + return new_val; +} + +static inline int kbase_atomic_sub_pages(int num_pages, atomic_t *used_pages) +{ + int new_val = atomic_sub_return(num_pages, used_pages); +#if defined(CONFIG_MALI_GATOR_SUPPORT) + kbase_trace_mali_total_alloc_pages_change((long long int)new_val); +#endif + return new_val; +} + +/* + * Max size for kbdev memory pool (in pages) + */ +#define KBASE_MEM_POOL_MAX_SIZE_KBDEV (SZ_64M >> PAGE_SHIFT) + +/* + * Max size for kctx memory pool (in pages) + */ +#define KBASE_MEM_POOL_MAX_SIZE_KCTX (SZ_64M >> PAGE_SHIFT) + +/* + * The order required for a 2MB page allocation (2^order * 4KB = 2MB) + */ +#define KBASE_MEM_POOL_2MB_PAGE_TABLE_ORDER 9 + +/* + * The order required for a 4KB page allocation + */ +#define KBASE_MEM_POOL_4KB_PAGE_TABLE_ORDER 0 + +/** + * kbase_mem_pool_init - Create a memory pool for a kbase device + * @pool: Memory pool to initialize + * @max_size: Maximum number of free pages the pool can hold + * @order: Page order for physical page size (order=0=>4kB, order=9=>2MB) + * @kbdev: Kbase device where memory is used + * @next_pool: Pointer to the next pool or NULL. + * + * Allocations from @pool are in whole pages. Each @pool has a free list where + * pages can be quickly allocated from. The free list is initially empty and + * filled whenever pages are freed back to the pool. The number of free pages + * in the pool will in general not exceed @max_size, but the pool may in + * certain corner cases grow above @max_size. + * + * If @next_pool is not NULL, we will allocate from @next_pool before going to + * the kernel allocator. Similarily pages can spill over to @next_pool when + * @pool is full. Pages are zeroed before they spill over to another pool, to + * prevent leaking information between applications. + * + * A shrinker is registered so that Linux mm can reclaim pages from the pool as + * needed. + * + * Return: 0 on success, negative -errno on error + */ +int kbase_mem_pool_init(struct kbase_mem_pool *pool, + size_t max_size, + size_t order, + struct kbase_device *kbdev, + struct kbase_mem_pool *next_pool); + +/** + * kbase_mem_pool_term - Destroy a memory pool + * @pool: Memory pool to destroy + * + * Pages in the pool will spill over to @next_pool (if available) or freed to + * the kernel. + */ +void kbase_mem_pool_term(struct kbase_mem_pool *pool); + +/** + * kbase_mem_pool_alloc - Allocate a page from memory pool + * @pool: Memory pool to allocate from + * + * Allocations from the pool are made as follows: + * 1. If there are free pages in the pool, allocate a page from @pool. + * 2. Otherwise, if @next_pool is not NULL and has free pages, allocate a page + * from @next_pool. + * 3. Return NULL if no memory in the pool + * + * Return: Pointer to allocated page, or NULL if allocation failed. + */ +struct page *kbase_mem_pool_alloc(struct kbase_mem_pool *pool); + +/** + * kbase_mem_pool_free - Free a page to memory pool + * @pool: Memory pool where page should be freed + * @page: Page to free to the pool + * @dirty: Whether some of the page may be dirty in the cache. + * + * Pages are freed to the pool as follows: + * 1. If @pool is not full, add @page to @pool. + * 2. Otherwise, if @next_pool is not NULL and not full, add @page to + * @next_pool. + * 3. Finally, free @page to the kernel. + */ +void kbase_mem_pool_free(struct kbase_mem_pool *pool, struct page *page, + bool dirty); + +/** + * kbase_mem_pool_alloc_pages - Allocate pages from memory pool + * @pool: Memory pool to allocate from + * @nr_pages: Number of pages to allocate + * @pages: Pointer to array where the physical address of the allocated + * pages will be stored. + * @partial_allowed: If fewer pages allocated is allowed + * + * Like kbase_mem_pool_alloc() but optimized for allocating many pages. + * + * Return: + * On success number of pages allocated (could be less than nr_pages if + * partial_allowed). + * On error an error code. + */ +int kbase_mem_pool_alloc_pages(struct kbase_mem_pool *pool, size_t nr_pages, + struct tagged_addr *pages, bool partial_allowed); + +/** + * kbase_mem_pool_free_pages - Free pages to memory pool + * @pool: Memory pool where pages should be freed + * @nr_pages: Number of pages to free + * @pages: Pointer to array holding the physical addresses of the pages to + * free. + * @dirty: Whether any pages may be dirty in the cache. + * @reclaimed: Whether the pages where reclaimable and thus should bypass + * the pool and go straight to the kernel. + * + * Like kbase_mem_pool_free() but optimized for freeing many pages. + */ +void kbase_mem_pool_free_pages(struct kbase_mem_pool *pool, size_t nr_pages, + struct tagged_addr *pages, bool dirty, bool reclaimed); + +/** + * kbase_mem_pool_size - Get number of free pages in memory pool + * @pool: Memory pool to inspect + * + * Note: the size of the pool may in certain corner cases exceed @max_size! + * + * Return: Number of free pages in the pool + */ +static inline size_t kbase_mem_pool_size(struct kbase_mem_pool *pool) +{ + return READ_ONCE(pool->cur_size); +} + +/** + * kbase_mem_pool_max_size - Get maximum number of free pages in memory pool + * @pool: Memory pool to inspect + * + * Return: Maximum number of free pages in the pool + */ +static inline size_t kbase_mem_pool_max_size(struct kbase_mem_pool *pool) +{ + return pool->max_size; +} + + +/** + * kbase_mem_pool_set_max_size - Set maximum number of free pages in memory pool + * @pool: Memory pool to inspect + * @max_size: Maximum number of free pages the pool can hold + * + * If @max_size is reduced, the pool will be shrunk to adhere to the new limit. + * For details see kbase_mem_pool_shrink(). + */ +void kbase_mem_pool_set_max_size(struct kbase_mem_pool *pool, size_t max_size); + +/** + * kbase_mem_pool_grow - Grow the pool + * @pool: Memory pool to grow + * @nr_to_grow: Number of pages to add to the pool + * + * Adds @nr_to_grow pages to the pool. Note that this may cause the pool to + * become larger than the maximum size specified. + * + * Returns: 0 on success, -ENOMEM if unable to allocate sufficent pages + */ +int kbase_mem_pool_grow(struct kbase_mem_pool *pool, size_t nr_to_grow); + +/** + * kbase_mem_pool_trim - Grow or shrink the pool to a new size + * @pool: Memory pool to trim + * @new_size: New number of pages in the pool + * + * If @new_size > @cur_size, fill the pool with new pages from the kernel, but + * not above the max_size for the pool. + * If @new_size < @cur_size, shrink the pool by freeing pages to the kernel. + */ +void kbase_mem_pool_trim(struct kbase_mem_pool *pool, size_t new_size); + +/** + * kbase_mem_alloc_page - Allocate a new page for a device + * @pool: Memory pool to allocate a page from + * + * Most uses should use kbase_mem_pool_alloc to allocate a page. However that + * function can fail in the event the pool is empty. + * + * Return: A new page or NULL if no memory + */ +struct page *kbase_mem_alloc_page(struct kbase_mem_pool *pool); + +int kbase_region_tracker_init(struct kbase_context *kctx); +int kbase_region_tracker_init_jit(struct kbase_context *kctx, u64 jit_va_pages); +void kbase_region_tracker_term(struct kbase_context *kctx); + +struct kbase_va_region *kbase_region_tracker_find_region_enclosing_address(struct kbase_context *kctx, u64 gpu_addr); + +/** + * @brief Check that a pointer is actually a valid region. + * + * Must be called with context lock held. + */ +struct kbase_va_region *kbase_region_tracker_find_region_base_address(struct kbase_context *kctx, u64 gpu_addr); + +struct kbase_va_region *kbase_alloc_free_region(struct kbase_context *kctx, u64 start_pfn, size_t nr_pages, int zone); +void kbase_free_alloced_region(struct kbase_va_region *reg); +int kbase_add_va_region(struct kbase_context *kctx, struct kbase_va_region *reg, u64 addr, size_t nr_pages, size_t align); + +bool kbase_check_alloc_flags(unsigned long flags); +bool kbase_check_import_flags(unsigned long flags); + +/** + * kbase_update_region_flags - Convert user space flags to kernel region flags + * + * @kctx: kbase context + * @reg: The region to update the flags on + * @flags: The flags passed from user space + * + * The user space flag BASE_MEM_COHERENT_SYSTEM_REQUIRED will be rejected and + * this function will fail if the system does not support system coherency. + * + * Return: 0 if successful, -EINVAL if the flags are not supported + */ +int kbase_update_region_flags(struct kbase_context *kctx, + struct kbase_va_region *reg, unsigned long flags); + +void kbase_gpu_vm_lock(struct kbase_context *kctx); +void kbase_gpu_vm_unlock(struct kbase_context *kctx); + +int kbase_alloc_phy_pages(struct kbase_va_region *reg, size_t vsize, size_t size); + +int kbase_mmu_init(struct kbase_context *kctx); +void kbase_mmu_term(struct kbase_context *kctx); + +phys_addr_t kbase_mmu_alloc_pgd(struct kbase_context *kctx); +void kbase_mmu_free_pgd(struct kbase_context *kctx); +int kbase_mmu_insert_pages_no_flush(struct kbase_context *kctx, u64 vpfn, + struct tagged_addr *phys, size_t nr, + unsigned long flags); +int kbase_mmu_insert_pages(struct kbase_context *kctx, u64 vpfn, + struct tagged_addr *phys, size_t nr, + unsigned long flags); +int kbase_mmu_insert_single_page(struct kbase_context *kctx, u64 vpfn, + struct tagged_addr phys, size_t nr, + unsigned long flags); + +int kbase_mmu_teardown_pages(struct kbase_context *kctx, u64 vpfn, size_t nr); +int kbase_mmu_update_pages(struct kbase_context *kctx, u64 vpfn, + struct tagged_addr *phys, size_t nr, + unsigned long flags); + +/** + * @brief Register region and map it on the GPU. + * + * Call kbase_add_va_region() and map the region on the GPU. + */ +int kbase_gpu_mmap(struct kbase_context *kctx, struct kbase_va_region *reg, u64 addr, size_t nr_pages, size_t align); + +/** + * @brief Remove the region from the GPU and unregister it. + * + * Must be called with context lock held. + */ +int kbase_gpu_munmap(struct kbase_context *kctx, struct kbase_va_region *reg); + +/** + * The caller has the following locking conditions: + * - It must hold kbase_device->mmu_hw_mutex + * - It must hold the hwaccess_lock + */ +void kbase_mmu_update(struct kbase_context *kctx); + +/** + * kbase_mmu_disable() - Disable the MMU for a previously active kbase context. + * @kctx: Kbase context + * + * Disable and perform the required cache maintenance to remove the all + * data from provided kbase context from the GPU caches. + * + * The caller has the following locking conditions: + * - It must hold kbase_device->mmu_hw_mutex + * - It must hold the hwaccess_lock + */ +void kbase_mmu_disable(struct kbase_context *kctx); + +/** + * kbase_mmu_disable_as() - Set the MMU to unmapped mode for the specified + * address space. + * @kbdev: Kbase device + * @as_nr: The address space number to set to unmapped. + * + * This function must only be called during reset/power-up and it used to + * ensure the registers are in a known state. + * + * The caller must hold kbdev->mmu_hw_mutex. + */ +void kbase_mmu_disable_as(struct kbase_device *kbdev, int as_nr); + +void kbase_mmu_interrupt(struct kbase_device *kbdev, u32 irq_stat); + +/** Dump the MMU tables to a buffer + * + * This function allocates a buffer (of @c nr_pages pages) to hold a dump of the MMU tables and fills it. If the + * buffer is too small then the return value will be NULL. + * + * The GPU vm lock must be held when calling this function. + * + * The buffer returned should be freed with @ref vfree when it is no longer required. + * + * @param[in] kctx The kbase context to dump + * @param[in] nr_pages The number of pages to allocate for the buffer. + * + * @return The address of the buffer containing the MMU dump or NULL on error (including if the @c nr_pages is too + * small) + */ +void *kbase_mmu_dump(struct kbase_context *kctx, int nr_pages); + +/** + * kbase_sync_now - Perform cache maintenance on a memory region + * + * @kctx: The kbase context of the region + * @sset: A syncset structure describing the region and direction of the + * synchronisation required + * + * Return: 0 on success or error code + */ +int kbase_sync_now(struct kbase_context *kctx, struct basep_syncset *sset); +void kbase_sync_single(struct kbase_context *kctx, struct tagged_addr cpu_pa, + struct tagged_addr gpu_pa, off_t offset, size_t size, + enum kbase_sync_type sync_fn); +void kbase_pre_job_sync(struct kbase_context *kctx, struct base_syncset *syncsets, size_t nr); +void kbase_post_job_sync(struct kbase_context *kctx, struct base_syncset *syncsets, size_t nr); + +/* OS specific functions */ +int kbase_mem_free(struct kbase_context *kctx, u64 gpu_addr); +int kbase_mem_free_region(struct kbase_context *kctx, struct kbase_va_region *reg); +void kbase_os_mem_map_lock(struct kbase_context *kctx); +void kbase_os_mem_map_unlock(struct kbase_context *kctx); + +/** + * @brief Update the memory allocation counters for the current process + * + * OS specific call to updates the current memory allocation counters for the current process with + * the supplied delta. + * + * @param[in] kctx The kbase context + * @param[in] pages The desired delta to apply to the memory usage counters. + */ + +void kbasep_os_process_page_usage_update(struct kbase_context *kctx, int pages); + +/** + * @brief Add to the memory allocation counters for the current process + * + * OS specific call to add to the current memory allocation counters for the current process by + * the supplied amount. + * + * @param[in] kctx The kernel base context used for the allocation. + * @param[in] pages The desired delta to apply to the memory usage counters. + */ + +static inline void kbase_process_page_usage_inc(struct kbase_context *kctx, int pages) +{ + kbasep_os_process_page_usage_update(kctx, pages); +} + +/** + * @brief Subtract from the memory allocation counters for the current process + * + * OS specific call to subtract from the current memory allocation counters for the current process by + * the supplied amount. + * + * @param[in] kctx The kernel base context used for the allocation. + * @param[in] pages The desired delta to apply to the memory usage counters. + */ + +static inline void kbase_process_page_usage_dec(struct kbase_context *kctx, int pages) +{ + kbasep_os_process_page_usage_update(kctx, 0 - pages); +} + +/** + * kbasep_find_enclosing_cpu_mapping_offset() - Find the offset of the CPU + * mapping of a memory allocation containing a given address range + * + * Searches for a CPU mapping of any part of any region that fully encloses the + * CPU virtual address range specified by @uaddr and @size. Returns a failure + * indication if only part of the address range lies within a CPU mapping. + * + * @kctx: The kernel base context used for the allocation. + * @uaddr: Start of the CPU virtual address range. + * @size: Size of the CPU virtual address range (in bytes). + * @offset: The offset from the start of the allocation to the specified CPU + * virtual address. + * + * Return: 0 if offset was obtained successfully. Error code otherwise. + */ +int kbasep_find_enclosing_cpu_mapping_offset( + struct kbase_context *kctx, + unsigned long uaddr, size_t size, u64 *offset); + +enum hrtimer_restart kbasep_as_poke_timer_callback(struct hrtimer *timer); +void kbase_as_poking_timer_retain_atom(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *katom); +void kbase_as_poking_timer_release_atom(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *katom); + +/** +* @brief Allocates physical pages. +* +* Allocates \a nr_pages_requested and updates the alloc object. +* +* @param[in] alloc allocation object to add pages to +* @param[in] nr_pages_requested number of physical pages to allocate +* +* @return 0 if all pages have been successfully allocated. Error code otherwise +*/ +int kbase_alloc_phy_pages_helper(struct kbase_mem_phy_alloc *alloc, size_t nr_pages_requested); + +/** +* @brief Free physical pages. +* +* Frees \a nr_pages and updates the alloc object. +* +* @param[in] alloc allocation object to free pages from +* @param[in] nr_pages_to_free number of physical pages to free +*/ +int kbase_free_phy_pages_helper(struct kbase_mem_phy_alloc *alloc, size_t nr_pages_to_free); + +static inline void kbase_set_dma_addr(struct page *p, dma_addr_t dma_addr) +{ + SetPagePrivate(p); + if (sizeof(dma_addr_t) > sizeof(p->private)) { + /* on 32-bit ARM with LPAE dma_addr_t becomes larger, but the + * private field stays the same. So we have to be clever and + * use the fact that we only store DMA addresses of whole pages, + * so the low bits should be zero */ + KBASE_DEBUG_ASSERT(!(dma_addr & (PAGE_SIZE - 1))); + set_page_private(p, dma_addr >> PAGE_SHIFT); + } else { + set_page_private(p, dma_addr); + } +} + +static inline dma_addr_t kbase_dma_addr(struct page *p) +{ + if (sizeof(dma_addr_t) > sizeof(p->private)) + return ((dma_addr_t)page_private(p)) << PAGE_SHIFT; + + return (dma_addr_t)page_private(p); +} + +static inline void kbase_clear_dma_addr(struct page *p) +{ + ClearPagePrivate(p); +} + +/** +* @brief Process a bus or page fault. +* +* This function will process a fault on a specific address space +* +* @param[in] kbdev The @ref kbase_device the fault happened on +* @param[in] kctx The @ref kbase_context for the faulting address space if +* one was found. +* @param[in] as The address space that has the fault +*/ +void kbase_mmu_interrupt_process(struct kbase_device *kbdev, + struct kbase_context *kctx, struct kbase_as *as); + +/** + * @brief Process a page fault. + * + * @param[in] data work_struct passed by queue_work() + */ +void page_fault_worker(struct work_struct *data); + +/** + * @brief Process a bus fault. + * + * @param[in] data work_struct passed by queue_work() + */ +void bus_fault_worker(struct work_struct *data); + +/** + * @brief Flush MMU workqueues. + * + * This function will cause any outstanding page or bus faults to be processed. + * It should be called prior to powering off the GPU. + * + * @param[in] kbdev Device pointer + */ +void kbase_flush_mmu_wqs(struct kbase_device *kbdev); + +/** + * kbase_sync_single_for_device - update physical memory and give GPU ownership + * @kbdev: Device pointer + * @handle: DMA address of region + * @size: Size of region to sync + * @dir: DMA data direction + */ + +void kbase_sync_single_for_device(struct kbase_device *kbdev, dma_addr_t handle, + size_t size, enum dma_data_direction dir); + +/** + * kbase_sync_single_for_cpu - update physical memory and give CPU ownership + * @kbdev: Device pointer + * @handle: DMA address of region + * @size: Size of region to sync + * @dir: DMA data direction + */ + +void kbase_sync_single_for_cpu(struct kbase_device *kbdev, dma_addr_t handle, + size_t size, enum dma_data_direction dir); + +#ifdef CONFIG_DEBUG_FS +/** + * kbase_jit_debugfs_init - Add per context debugfs entry for JIT. + * @kctx: kbase context + */ +void kbase_jit_debugfs_init(struct kbase_context *kctx); +#endif /* CONFIG_DEBUG_FS */ + +/** + * kbase_jit_init - Initialize the JIT memory pool management + * @kctx: kbase context + * + * Returns zero on success or negative error number on failure. + */ +int kbase_jit_init(struct kbase_context *kctx); + +/** + * kbase_jit_allocate - Allocate JIT memory + * @kctx: kbase context + * @info: JIT allocation information + * + * Return: JIT allocation on success or NULL on failure. + */ +struct kbase_va_region *kbase_jit_allocate(struct kbase_context *kctx, + struct base_jit_alloc_info *info); + +/** + * kbase_jit_free - Free a JIT allocation + * @kctx: kbase context + * @reg: JIT allocation + * + * Frees a JIT allocation and places it into the free pool for later reuse. + */ +void kbase_jit_free(struct kbase_context *kctx, struct kbase_va_region *reg); + +/** + * kbase_jit_backing_lost - Inform JIT that an allocation has lost backing + * @reg: JIT allocation + */ +void kbase_jit_backing_lost(struct kbase_va_region *reg); + +/** + * kbase_jit_evict - Evict a JIT allocation from the pool + * @kctx: kbase context + * + * Evict the least recently used JIT allocation from the pool. This can be + * required if normal VA allocations are failing due to VA exhaustion. + * + * Return: True if a JIT allocation was freed, false otherwise. + */ +bool kbase_jit_evict(struct kbase_context *kctx); + +/** + * kbase_jit_term - Terminate the JIT memory pool management + * @kctx: kbase context + */ +void kbase_jit_term(struct kbase_context *kctx); + +/** + * kbase_map_external_resource - Map an external resource to the GPU. + * @kctx: kbase context. + * @reg: The region to map. + * @locked_mm: The mm_struct which has been locked for this operation. + * + * Return: The physical allocation which backs the region on success or NULL + * on failure. + */ +struct kbase_mem_phy_alloc *kbase_map_external_resource( + struct kbase_context *kctx, struct kbase_va_region *reg, + struct mm_struct *locked_mm); + +/** + * kbase_unmap_external_resource - Unmap an external resource from the GPU. + * @kctx: kbase context. + * @reg: The region to unmap or NULL if it has already been released. + * @alloc: The physical allocation being unmapped. + */ +void kbase_unmap_external_resource(struct kbase_context *kctx, + struct kbase_va_region *reg, struct kbase_mem_phy_alloc *alloc); + +/** + * kbase_sticky_resource_init - Initialize sticky resource management. + * @kctx: kbase context + * + * Returns zero on success or negative error number on failure. + */ +int kbase_sticky_resource_init(struct kbase_context *kctx); + +/** + * kbase_sticky_resource_acquire - Acquire a reference on a sticky resource. + * @kctx: kbase context. + * @gpu_addr: The GPU address of the external resource. + * + * Return: The metadata object which represents the binding between the + * external resource and the kbase context on success or NULL on failure. + */ +struct kbase_ctx_ext_res_meta *kbase_sticky_resource_acquire( + struct kbase_context *kctx, u64 gpu_addr); + +/** + * kbase_sticky_resource_release - Release a reference on a sticky resource. + * @kctx: kbase context. + * @meta: Binding metadata. + * @gpu_addr: GPU address of the external resource. + * + * If meta is NULL then gpu_addr will be used to scan the metadata list and + * find the matching metadata (if any), otherwise the provided meta will be + * used and gpu_addr will be ignored. + * + * Return: True if the release found the metadata and the reference was dropped. + */ +bool kbase_sticky_resource_release(struct kbase_context *kctx, + struct kbase_ctx_ext_res_meta *meta, u64 gpu_addr); + +/** + * kbase_sticky_resource_term - Terminate sticky resource management. + * @kctx: kbase context + */ +void kbase_sticky_resource_term(struct kbase_context *kctx); + +/** + * kbase_zone_cache_update - Update the memory zone cache after new pages have + * been added. + * @alloc: The physical memory allocation to build the cache for. + * @start_offset: Offset to where the new pages start. + * + * Updates an existing memory zone cache, updating the counters for the + * various zones. + * If the memory allocation doesn't already have a zone cache assume that + * one isn't created and thus don't do anything. + * + * Return: Zero cache was updated, negative error code on error. + */ +int kbase_zone_cache_update(struct kbase_mem_phy_alloc *alloc, + size_t start_offset); + +/** + * kbase_zone_cache_build - Build the memory zone cache. + * @alloc: The physical memory allocation to build the cache for. + * + * Create a new zone cache for the provided physical memory allocation if + * one doesn't already exist, if one does exist then just return. + * + * Return: Zero if the zone cache was created, negative error code on error. + */ +int kbase_zone_cache_build(struct kbase_mem_phy_alloc *alloc); + +/** + * kbase_zone_cache_clear - Clear the memory zone cache. + * @alloc: The physical memory allocation to clear the cache on. + */ +void kbase_zone_cache_clear(struct kbase_mem_phy_alloc *alloc); + +#endif /* _KBASE_MEM_H_ */ diff --git a/drivers/gpu/arm/midgard/mali_kbase_mem_linux.c b/drivers/gpu/arm/midgard/mali_kbase_mem_linux.c new file mode 100644 index 00000000000000..fc416e68008cda --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_mem_linux.c @@ -0,0 +1,2751 @@ +/* + * + * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + + +/** + * @file mali_kbase_mem_linux.c + * Base kernel memory APIs, Linux implementation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)) && \ + (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) +#include +#endif /* LINUX_VERSION_CODE >= 3.5.0 && < 4.8.0 */ +#ifdef CONFIG_DMA_SHARED_BUFFER +#include +#endif /* defined(CONFIG_DMA_SHARED_BUFFER) */ +#include +#include + +#include +#include +#include +#include +#include + +/* + * From 4.20.0 kernel vm_insert_pfn was dropped + * Make wrapper to preserve compatibility + */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0) +int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, + unsigned long pfn) +{ + return vm_fault_to_errno(vmf_insert_pfn(vma, addr, pfn), 0xffff); +} +#endif + +static int kbase_tracking_page_setup(struct kbase_context *kctx, struct vm_area_struct *vma); + +/** + * kbase_mem_shrink_cpu_mapping - Shrink the CPU mapping(s) of an allocation + * @kctx: Context the region belongs to + * @reg: The GPU region + * @new_pages: The number of pages after the shrink + * @old_pages: The number of pages before the shrink + * + * Shrink (or completely remove) all CPU mappings which reference the shrunk + * part of the allocation. + * + * Note: Caller must be holding the processes mmap_sem lock. + */ +static void kbase_mem_shrink_cpu_mapping(struct kbase_context *kctx, + struct kbase_va_region *reg, + u64 new_pages, u64 old_pages); + +/** + * kbase_mem_shrink_gpu_mapping - Shrink the GPU mapping of an allocation + * @kctx: Context the region belongs to + * @reg: The GPU region or NULL if there isn't one + * @new_pages: The number of pages after the shrink + * @old_pages: The number of pages before the shrink + * + * Return: 0 on success, negative -errno on error + * + * Unmap the shrunk pages from the GPU mapping. Note that the size of the region + * itself is unmodified as we still need to reserve the VA, only the page tables + * will be modified by this function. + */ +static int kbase_mem_shrink_gpu_mapping(struct kbase_context *kctx, + struct kbase_va_region *reg, + u64 new_pages, u64 old_pages); + +struct kbase_va_region *kbase_mem_alloc(struct kbase_context *kctx, + u64 va_pages, u64 commit_pages, u64 extent, u64 *flags, + u64 *gpu_va) +{ + int zone; + int gpu_pc_bits; + struct kbase_va_region *reg; + struct device *dev; + + KBASE_DEBUG_ASSERT(kctx); + KBASE_DEBUG_ASSERT(flags); + KBASE_DEBUG_ASSERT(gpu_va); + + dev = kctx->kbdev->dev; + *gpu_va = 0; /* return 0 on failure */ + + gpu_pc_bits = kctx->kbdev->gpu_props.props.core_props.log2_program_counter_size; + + if (0 == va_pages) { + dev_warn(dev, "kbase_mem_alloc called with 0 va_pages!"); + goto bad_size; + } + + if (va_pages > (U64_MAX / PAGE_SIZE)) + /* 64-bit address range is the max */ + goto bad_size; + + if (!kbase_check_alloc_flags(*flags)) { + dev_warn(dev, + "kbase_mem_alloc called with bad flags (%llx)", + (unsigned long long)*flags); + goto bad_flags; + } + + if ((*flags & BASE_MEM_COHERENT_SYSTEM_REQUIRED) != 0 && + !kbase_device_is_cpu_coherent(kctx->kbdev)) { + dev_warn(dev, "kbase_mem_alloc call required coherent mem when unavailable"); + goto bad_flags; + } + if ((*flags & BASE_MEM_COHERENT_SYSTEM) != 0 && + !kbase_device_is_cpu_coherent(kctx->kbdev)) { + /* Remove COHERENT_SYSTEM flag if coherent mem is unavailable */ + *flags &= ~BASE_MEM_COHERENT_SYSTEM; + } + + /* Limit GPU executable allocs to GPU PC size */ + if ((*flags & BASE_MEM_PROT_GPU_EX) && + (va_pages > (1ULL << gpu_pc_bits >> PAGE_SHIFT))) + goto bad_ex_size; + + /* find out which VA zone to use */ + if (*flags & BASE_MEM_SAME_VA) + zone = KBASE_REG_ZONE_SAME_VA; + else if (*flags & BASE_MEM_PROT_GPU_EX) + zone = KBASE_REG_ZONE_EXEC; + else + zone = KBASE_REG_ZONE_CUSTOM_VA; + + reg = kbase_alloc_free_region(kctx, 0, va_pages, zone); + if (!reg) { + dev_err(dev, "Failed to allocate free region"); + goto no_region; + } + + if (kbase_update_region_flags(kctx, reg, *flags) != 0) + goto invalid_flags; + + if (kbase_reg_prepare_native(reg, kctx) != 0) { + dev_err(dev, "Failed to prepare region"); + goto prepare_failed; + } + + if (*flags & BASE_MEM_GROW_ON_GPF) { + reg->extent = extent; + if (reg->extent == 0) + goto invalid_extent; + } + else + reg->extent = 0; + + if (kbase_alloc_phy_pages(reg, va_pages, commit_pages) != 0) { + dev_warn(dev, "Failed to allocate %lld pages (va_pages=%lld)", + (unsigned long long)commit_pages, + (unsigned long long)va_pages); + goto no_mem; + } + + kbase_gpu_vm_lock(kctx); + + /* mmap needed to setup VA? */ + if (*flags & BASE_MEM_SAME_VA) { + unsigned long prot = PROT_NONE; + unsigned long va_size = va_pages << PAGE_SHIFT; + unsigned long va_map = va_size; + unsigned long cookie, cookie_nr; + unsigned long cpu_addr; + + /* Bind to a cookie */ + if (!kctx->cookies) { + dev_err(dev, "No cookies available for allocation!"); + kbase_gpu_vm_unlock(kctx); + goto no_cookie; + } + /* return a cookie */ + cookie_nr = __ffs(kctx->cookies); + kctx->cookies &= ~(1UL << cookie_nr); + BUG_ON(kctx->pending_regions[cookie_nr]); + kctx->pending_regions[cookie_nr] = reg; + + kbase_gpu_vm_unlock(kctx); + + /* relocate to correct base */ + cookie = cookie_nr + PFN_DOWN(BASE_MEM_COOKIE_BASE); + cookie <<= PAGE_SHIFT; + + /* + * 10.1-10.4 UKU userland relies on the kernel to call mmap. + * For all other versions we can just return the cookie + */ + if (kctx->api_version < KBASE_API_VERSION(10, 1) || + kctx->api_version > KBASE_API_VERSION(10, 4)) { + *gpu_va = (u64) cookie; + return reg; + } + if (*flags & BASE_MEM_PROT_CPU_RD) + prot |= PROT_READ; + if (*flags & BASE_MEM_PROT_CPU_WR) + prot |= PROT_WRITE; + + cpu_addr = vm_mmap(kctx->filp, 0, va_map, prot, + MAP_SHARED, cookie); + + if (IS_ERR_VALUE(cpu_addr)) { + kbase_gpu_vm_lock(kctx); + kctx->pending_regions[cookie_nr] = NULL; + kctx->cookies |= (1UL << cookie_nr); + kbase_gpu_vm_unlock(kctx); + goto no_mmap; + } + + *gpu_va = (u64) cpu_addr; + } else /* we control the VA */ { + if (kbase_gpu_mmap(kctx, reg, 0, va_pages, 1) != 0) { + dev_warn(dev, "Failed to map memory on GPU"); + kbase_gpu_vm_unlock(kctx); + goto no_mmap; + } + /* return real GPU VA */ + *gpu_va = reg->start_pfn << PAGE_SHIFT; + + kbase_gpu_vm_unlock(kctx); + } + + return reg; + +no_mmap: +no_cookie: +no_mem: +invalid_extent: + kbase_mem_phy_alloc_put(reg->cpu_alloc); + kbase_mem_phy_alloc_put(reg->gpu_alloc); +invalid_flags: +prepare_failed: + kfree(reg); +no_region: +bad_ex_size: +bad_flags: +bad_size: + return NULL; +} +KBASE_EXPORT_TEST_API(kbase_mem_alloc); + +int kbase_mem_query(struct kbase_context *kctx, u64 gpu_addr, int query, u64 * const out) +{ + struct kbase_va_region *reg; + int ret = -EINVAL; + + KBASE_DEBUG_ASSERT(kctx); + KBASE_DEBUG_ASSERT(out); + + if (gpu_addr & ~PAGE_MASK) { + dev_warn(kctx->kbdev->dev, "mem_query: gpu_addr: passed parameter is invalid"); + return -EINVAL; + } + + kbase_gpu_vm_lock(kctx); + + /* Validate the region */ + reg = kbase_region_tracker_find_region_base_address(kctx, gpu_addr); + if (!reg || (reg->flags & KBASE_REG_FREE)) + goto out_unlock; + + switch (query) { + case KBASE_MEM_QUERY_COMMIT_SIZE: + if (reg->cpu_alloc->type != KBASE_MEM_TYPE_ALIAS) { + *out = kbase_reg_current_backed_size(reg); + } else { + size_t i; + struct kbase_aliased *aliased; + *out = 0; + aliased = reg->cpu_alloc->imported.alias.aliased; + for (i = 0; i < reg->cpu_alloc->imported.alias.nents; i++) + *out += aliased[i].length; + } + break; + case KBASE_MEM_QUERY_VA_SIZE: + *out = reg->nr_pages; + break; + case KBASE_MEM_QUERY_FLAGS: + { + *out = 0; + if (KBASE_REG_CPU_WR & reg->flags) + *out |= BASE_MEM_PROT_CPU_WR; + if (KBASE_REG_CPU_RD & reg->flags) + *out |= BASE_MEM_PROT_CPU_RD; + if (KBASE_REG_CPU_CACHED & reg->flags) + *out |= BASE_MEM_CACHED_CPU; + if (KBASE_REG_GPU_WR & reg->flags) + *out |= BASE_MEM_PROT_GPU_WR; + if (KBASE_REG_GPU_RD & reg->flags) + *out |= BASE_MEM_PROT_GPU_RD; + if (!(KBASE_REG_GPU_NX & reg->flags)) + *out |= BASE_MEM_PROT_GPU_EX; + if (KBASE_REG_SHARE_BOTH & reg->flags) + *out |= BASE_MEM_COHERENT_SYSTEM; + if (KBASE_REG_SHARE_IN & reg->flags) + *out |= BASE_MEM_COHERENT_LOCAL; + break; + } + default: + *out = 0; + goto out_unlock; + } + + ret = 0; + +out_unlock: + kbase_gpu_vm_unlock(kctx); + return ret; +} + +/** + * kbase_mem_evictable_reclaim_count_objects - Count number of pages in the + * Ephemeral memory eviction list. + * @s: Shrinker + * @sc: Shrinker control + * + * Return: Number of pages which can be freed. + */ +static +unsigned long kbase_mem_evictable_reclaim_count_objects(struct shrinker *s, + struct shrink_control *sc) +{ + struct kbase_context *kctx; + struct kbase_mem_phy_alloc *alloc; + unsigned long pages = 0; + + kctx = container_of(s, struct kbase_context, reclaim); + + mutex_lock(&kctx->jit_evict_lock); + + list_for_each_entry(alloc, &kctx->evict_list, evict_node) + pages += alloc->nents; + + mutex_unlock(&kctx->jit_evict_lock); + return pages; +} + +/** + * kbase_mem_evictable_reclaim_scan_objects - Scan the Ephemeral memory eviction + * list for pages and try to reclaim them. + * @s: Shrinker + * @sc: Shrinker control + * + * Return: Number of pages freed (can be less then requested) or -1 if the + * shrinker failed to free pages in its pool. + * + * Note: + * This function accesses region structures without taking the region lock, + * this is required as the OOM killer can call the shrinker after the region + * lock has already been held. + * This is safe as we can guarantee that a region on the eviction list will + * not be freed (kbase_mem_free_region removes the allocation from the list + * before destroying it), or modified by other parts of the driver. + * The eviction list itself is guarded by the eviction lock and the MMU updates + * are protected by their own lock. + */ +static +unsigned long kbase_mem_evictable_reclaim_scan_objects(struct shrinker *s, + struct shrink_control *sc) +{ + struct kbase_context *kctx; + struct kbase_mem_phy_alloc *alloc; + struct kbase_mem_phy_alloc *tmp; + unsigned long freed = 0; + + kctx = container_of(s, struct kbase_context, reclaim); + mutex_lock(&kctx->jit_evict_lock); + + list_for_each_entry_safe(alloc, tmp, &kctx->evict_list, evict_node) { + int err; + + err = kbase_mem_shrink_gpu_mapping(kctx, alloc->reg, + 0, alloc->nents); + if (err != 0) { + /* + * Failed to remove GPU mapping, tell the shrinker + * to stop trying to shrink our slab even though we + * have pages in it. + */ + freed = -1; + goto out_unlock; + } + + /* + * Update alloc->evicted before freeing the backing so the + * helper can determine that it needs to bypass the accounting + * and memory pool. + */ + alloc->evicted = alloc->nents; + + kbase_free_phy_pages_helper(alloc, alloc->evicted); + freed += alloc->evicted; + list_del_init(&alloc->evict_node); + + /* + * Inform the JIT allocator this region has lost backing + * as it might need to free the allocation. + */ + kbase_jit_backing_lost(alloc->reg); + + /* Enough pages have been freed so stop now */ + if (freed > sc->nr_to_scan) + break; + } +out_unlock: + mutex_unlock(&kctx->jit_evict_lock); + + return freed; +} + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0) +static int kbase_mem_evictable_reclaim_shrink(struct shrinker *s, + struct shrink_control *sc) +{ + if (sc->nr_to_scan == 0) + return kbase_mem_evictable_reclaim_count_objects(s, sc); + + return kbase_mem_evictable_reclaim_scan_objects(s, sc); +} +#endif + +int kbase_mem_evictable_init(struct kbase_context *kctx) +{ + INIT_LIST_HEAD(&kctx->evict_list); + mutex_init(&kctx->jit_evict_lock); + + /* Register shrinker */ +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0) + kctx->reclaim.shrink = kbase_mem_evictable_reclaim_shrink; +#else + kctx->reclaim.count_objects = kbase_mem_evictable_reclaim_count_objects; + kctx->reclaim.scan_objects = kbase_mem_evictable_reclaim_scan_objects; +#endif + kctx->reclaim.seeks = DEFAULT_SEEKS; + /* Kernel versions prior to 3.1 : + * struct shrinker does not define batch */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0) + kctx->reclaim.batch = 0; +#endif + register_shrinker(&kctx->reclaim); + return 0; +} + +void kbase_mem_evictable_deinit(struct kbase_context *kctx) +{ + unregister_shrinker(&kctx->reclaim); +} + +struct kbase_mem_zone_cache_entry { + /* List head used to link the cache entry to the memory allocation. */ + struct list_head zone_node; + /* The zone the cacheline is for. */ + struct zone *zone; + /* The number of pages in the allocation which belong to this zone. */ + u64 count; +}; + +static bool kbase_zone_cache_builder(struct kbase_mem_phy_alloc *alloc, + size_t start_offset) +{ + struct kbase_mem_zone_cache_entry *cache = NULL; + size_t i; + int ret = 0; + + for (i = start_offset; i < alloc->nents; i++) { + struct page *p = phys_to_page(as_phys_addr_t(alloc->pages[i])); + struct zone *zone = page_zone(p); + bool create = true; + + if (cache && (cache->zone == zone)) { + /* + * Fast path check as most of the time adjacent + * pages come from the same zone. + */ + create = false; + } else { + /* + * Slow path check, walk all the cache entries to see + * if we already know about this zone. + */ + list_for_each_entry(cache, &alloc->zone_cache, zone_node) { + if (cache->zone == zone) { + create = false; + break; + } + } + } + + /* This zone wasn't found in the cache, create an entry for it */ + if (create) { + cache = kmalloc(sizeof(*cache), GFP_KERNEL); + if (!cache) { + ret = -ENOMEM; + goto bail; + } + cache->zone = zone; + cache->count = 0; + list_add(&cache->zone_node, &alloc->zone_cache); + } + + cache->count++; + } + return 0; + +bail: + return ret; +} + +int kbase_zone_cache_update(struct kbase_mem_phy_alloc *alloc, + size_t start_offset) +{ + /* + * Bail if the zone cache is empty, only update the cache if it + * existed in the first place. + */ + if (list_empty(&alloc->zone_cache)) + return 0; + + return kbase_zone_cache_builder(alloc, start_offset); +} + +int kbase_zone_cache_build(struct kbase_mem_phy_alloc *alloc) +{ + /* Bail if the zone cache already exists */ + if (!list_empty(&alloc->zone_cache)) + return 0; + + return kbase_zone_cache_builder(alloc, 0); +} + +void kbase_zone_cache_clear(struct kbase_mem_phy_alloc *alloc) +{ + struct kbase_mem_zone_cache_entry *walker; + + while(!list_empty(&alloc->zone_cache)){ + walker = list_first_entry(&alloc->zone_cache, + struct kbase_mem_zone_cache_entry, zone_node); + list_del(&walker->zone_node); + kfree(walker); + } +} + +/** + * kbase_mem_evictable_mark_reclaim - Mark the pages as reclaimable. + * @alloc: The physical allocation + */ +static void kbase_mem_evictable_mark_reclaim(struct kbase_mem_phy_alloc *alloc) +{ + struct kbase_context *kctx = alloc->imported.kctx; + struct kbase_mem_zone_cache_entry *zone_cache; + int __maybe_unused new_page_count; + int err; + + /* Attempt to build a zone cache of tracking */ + err = kbase_zone_cache_build(alloc); + if (err == 0) { + /* Bulk update all the zones */ + list_for_each_entry(zone_cache, &alloc->zone_cache, zone_node) { + zone_page_state_add(zone_cache->count, +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 9, 0) + zone_cache->zone, NR_SLAB_RECLAIMABLE_B); +#else + zone_cache->zone, NR_SLAB_RECLAIMABLE); +#endif + } + } else { + /* Fall-back to page by page updates */ + int i; + + for (i = 0; i < alloc->nents; i++) { + struct page *p; + struct zone *zone; + + p = phys_to_page(as_phys_addr_t(alloc->pages[i])); + zone = page_zone(p); + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 9, 0) + zone_page_state_add(1, zone, NR_SLAB_RECLAIMABLE_B); +#else + zone_page_state_add(1, zone, NR_SLAB_RECLAIMABLE); +#endif + } + } + + kbase_process_page_usage_dec(kctx, alloc->nents); + new_page_count = kbase_atomic_sub_pages(alloc->nents, + &kctx->used_pages); + kbase_atomic_sub_pages(alloc->nents, &kctx->kbdev->memdev.used_pages); + + KBASE_TLSTREAM_AUX_PAGESALLOC( + kctx->id, + (u64)new_page_count); +} + +/** + * kbase_mem_evictable_unmark_reclaim - Mark the pages as no longer reclaimable. + * @alloc: The physical allocation + */ +static +void kbase_mem_evictable_unmark_reclaim(struct kbase_mem_phy_alloc *alloc) +{ + struct kbase_context *kctx = alloc->imported.kctx; + struct kbase_mem_zone_cache_entry *zone_cache; + int __maybe_unused new_page_count; + int err; + + new_page_count = kbase_atomic_add_pages(alloc->nents, + &kctx->used_pages); + kbase_atomic_add_pages(alloc->nents, &kctx->kbdev->memdev.used_pages); + + /* Increase mm counters so that the allocation is accounted for + * against the process and thus is visible to the OOM killer, + * then remove it from the reclaimable accounting. */ + kbase_process_page_usage_inc(kctx, alloc->nents); + + /* Attempt to build a zone cache of tracking */ + err = kbase_zone_cache_build(alloc); + if (err == 0) { + /* Bulk update all the zones */ + list_for_each_entry(zone_cache, &alloc->zone_cache, zone_node) { + zone_page_state_add(-zone_cache->count, +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 9, 0) + zone_cache->zone, NR_SLAB_RECLAIMABLE_B); +#else + zone_cache->zone, NR_SLAB_RECLAIMABLE); +#endif + } + } else { + /* Fall-back to page by page updates */ + int i; + + for (i = 0; i < alloc->nents; i++) { + struct page *p; + struct zone *zone; + + p = phys_to_page(as_phys_addr_t(alloc->pages[i])); + zone = page_zone(p); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 9, 0) + zone_page_state_add(-1, zone, NR_SLAB_RECLAIMABLE_B); +#else + zone_page_state_add(-1, zone, NR_SLAB_RECLAIMABLE); +#endif + } + } + + KBASE_TLSTREAM_AUX_PAGESALLOC( + kctx->id, + (u64)new_page_count); +} + +int kbase_mem_evictable_make(struct kbase_mem_phy_alloc *gpu_alloc) +{ + struct kbase_context *kctx = gpu_alloc->imported.kctx; + + lockdep_assert_held(&kctx->reg_lock); + + /* This alloction can't already be on a list. */ + WARN_ON(!list_empty(&gpu_alloc->evict_node)); + + kbase_mem_shrink_cpu_mapping(kctx, gpu_alloc->reg, + 0, gpu_alloc->nents); + + /* + * Add the allocation to the eviction list, after this point the shrink + * can reclaim it. + */ + mutex_lock(&kctx->jit_evict_lock); + list_add(&gpu_alloc->evict_node, &kctx->evict_list); + mutex_unlock(&kctx->jit_evict_lock); + kbase_mem_evictable_mark_reclaim(gpu_alloc); + + gpu_alloc->reg->flags |= KBASE_REG_DONT_NEED; + return 0; +} + +bool kbase_mem_evictable_unmake(struct kbase_mem_phy_alloc *gpu_alloc) +{ + struct kbase_context *kctx = gpu_alloc->imported.kctx; + int err = 0; + + lockdep_assert_held(&kctx->reg_lock); + + /* + * First remove the allocation from the eviction list as it's no + * longer eligible for eviction. + */ + list_del_init(&gpu_alloc->evict_node); + + if (gpu_alloc->evicted == 0) { + /* + * The backing is still present, update the VM stats as it's + * in use again. + */ + kbase_mem_evictable_unmark_reclaim(gpu_alloc); + } else { + /* If the region is still alive ... */ + if (gpu_alloc->reg) { + /* ... allocate replacement backing ... */ + err = kbase_alloc_phy_pages_helper(gpu_alloc, + gpu_alloc->evicted); + + /* + * ... and grow the mapping back to its + * pre-eviction size. + */ + if (!err) + err = kbase_mem_grow_gpu_mapping(kctx, + gpu_alloc->reg, + gpu_alloc->evicted, 0); + + gpu_alloc->evicted = 0; + } + } + + /* If the region is still alive remove the DONT_NEED attribute. */ + if (gpu_alloc->reg) + gpu_alloc->reg->flags &= ~KBASE_REG_DONT_NEED; + + return (err == 0); +} + +int kbase_mem_flags_change(struct kbase_context *kctx, u64 gpu_addr, unsigned int flags, unsigned int mask) +{ + struct kbase_va_region *reg; + int ret = -EINVAL; + unsigned int real_flags = 0; + unsigned int prev_flags = 0; + bool prev_needed, new_needed; + + KBASE_DEBUG_ASSERT(kctx); + + if (!gpu_addr) + return -EINVAL; + + if ((gpu_addr & ~PAGE_MASK) && (gpu_addr >= PAGE_SIZE)) + return -EINVAL; + + /* nuke other bits */ + flags &= mask; + + /* check for only supported flags */ + if (flags & ~(BASE_MEM_FLAGS_MODIFIABLE)) + goto out; + + /* mask covers bits we don't support? */ + if (mask & ~(BASE_MEM_FLAGS_MODIFIABLE)) + goto out; + + /* convert flags */ + if (BASE_MEM_COHERENT_SYSTEM & flags) + real_flags |= KBASE_REG_SHARE_BOTH; + else if (BASE_MEM_COHERENT_LOCAL & flags) + real_flags |= KBASE_REG_SHARE_IN; + + /* now we can lock down the context, and find the region */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0) + down_write(¤t->mm->mmap_lock); +#else + down_write(¤t->mm->mmap_sem); +#endif + kbase_gpu_vm_lock(kctx); + + /* Validate the region */ + reg = kbase_region_tracker_find_region_base_address(kctx, gpu_addr); + if (!reg || (reg->flags & KBASE_REG_FREE)) + goto out_unlock; + + /* Is the region being transitioning between not needed and needed? */ + prev_needed = (KBASE_REG_DONT_NEED & reg->flags) == KBASE_REG_DONT_NEED; + new_needed = (BASE_MEM_DONT_NEED & flags) == BASE_MEM_DONT_NEED; + if (prev_needed != new_needed) { + /* Aliased allocations can't be made ephemeral */ + if (atomic_read(®->cpu_alloc->gpu_mappings) > 1) + goto out_unlock; + + if (new_needed) { + /* Only native allocations can be marked not needed */ + if (reg->cpu_alloc->type != KBASE_MEM_TYPE_NATIVE) { + ret = -EINVAL; + goto out_unlock; + } + ret = kbase_mem_evictable_make(reg->gpu_alloc); + if (ret) + goto out_unlock; + } else { + kbase_mem_evictable_unmake(reg->gpu_alloc); + } + } + + /* limit to imported memory */ + if ((reg->gpu_alloc->type != KBASE_MEM_TYPE_IMPORTED_UMP) && + (reg->gpu_alloc->type != KBASE_MEM_TYPE_IMPORTED_UMM)) + goto out_unlock; + + /* no change? */ + if (real_flags == (reg->flags & (KBASE_REG_SHARE_IN | KBASE_REG_SHARE_BOTH))) { + ret = 0; + goto out_unlock; + } + + /* save for roll back */ + prev_flags = reg->flags; + reg->flags &= ~(KBASE_REG_SHARE_IN | KBASE_REG_SHARE_BOTH); + reg->flags |= real_flags; + + /* Currently supporting only imported memory */ + switch (reg->gpu_alloc->type) { +#ifdef CONFIG_UMP + case KBASE_MEM_TYPE_IMPORTED_UMP: + ret = kbase_mmu_update_pages(kctx, reg->start_pfn, + kbase_get_gpu_phy_pages(reg), + reg->gpu_alloc->nents, reg->flags); + break; +#endif +#ifdef CONFIG_DMA_SHARED_BUFFER + case KBASE_MEM_TYPE_IMPORTED_UMM: + /* Future use will use the new flags, existing mapping will NOT be updated + * as memory should not be in use by the GPU when updating the flags. + */ + ret = 0; + WARN_ON(reg->gpu_alloc->imported.umm.current_mapping_usage_count); + break; +#endif + default: + break; + } + + /* roll back on error, i.e. not UMP */ + if (ret) + reg->flags = prev_flags; + +out_unlock: + kbase_gpu_vm_unlock(kctx); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0) + up_write(¤t->mm->mmap_lock); +#else + up_write(¤t->mm->mmap_sem); +#endif +out: + return ret; +} + +#define KBASE_MEM_IMPORT_HAVE_PAGES (1UL << BASE_MEM_FLAGS_NR_BITS) + +#ifdef CONFIG_UMP +static struct kbase_va_region *kbase_mem_from_ump(struct kbase_context *kctx, ump_secure_id id, u64 *va_pages, u64 *flags) +{ + struct kbase_va_region *reg; + ump_dd_handle umph; + u64 block_count; + const ump_dd_physical_block_64 *block_array; + u64 i, j; + int page = 0; + ump_alloc_flags ump_flags; + ump_alloc_flags cpu_flags; + ump_alloc_flags gpu_flags; + + if (*flags & BASE_MEM_SECURE) + goto bad_flags; + + umph = ump_dd_from_secure_id(id); + if (UMP_DD_INVALID_MEMORY_HANDLE == umph) + goto bad_id; + + ump_flags = ump_dd_allocation_flags_get(umph); + cpu_flags = (ump_flags >> UMP_DEVICE_CPU_SHIFT) & UMP_DEVICE_MASK; + gpu_flags = (ump_flags >> DEFAULT_UMP_GPU_DEVICE_SHIFT) & + UMP_DEVICE_MASK; + + *va_pages = ump_dd_size_get_64(umph); + *va_pages >>= PAGE_SHIFT; + + if (!*va_pages) + goto bad_size; + + if (*va_pages > (U64_MAX / PAGE_SIZE)) + /* 64-bit address range is the max */ + goto bad_size; + + if (*flags & BASE_MEM_SAME_VA) + reg = kbase_alloc_free_region(kctx, 0, *va_pages, KBASE_REG_ZONE_SAME_VA); + else + reg = kbase_alloc_free_region(kctx, 0, *va_pages, KBASE_REG_ZONE_CUSTOM_VA); + + if (!reg) + goto no_region; + + /* we've got pages to map now, and support SAME_VA */ + *flags |= KBASE_MEM_IMPORT_HAVE_PAGES; + + reg->gpu_alloc = kbase_alloc_create(*va_pages, KBASE_MEM_TYPE_IMPORTED_UMP); + if (IS_ERR_OR_NULL(reg->gpu_alloc)) + goto no_alloc_obj; + + reg->cpu_alloc = kbase_mem_phy_alloc_get(reg->gpu_alloc); + + reg->gpu_alloc->imported.ump_handle = umph; + + reg->flags &= ~KBASE_REG_FREE; + reg->flags |= KBASE_REG_GPU_NX; /* UMP is always No eXecute */ + reg->flags &= ~KBASE_REG_GROWABLE; /* UMP cannot be grown */ + + /* Override import flags based on UMP flags */ + *flags &= ~(BASE_MEM_CACHED_CPU); + *flags &= ~(BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_CPU_WR); + *flags &= ~(BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_GPU_WR); + + if ((cpu_flags & (UMP_HINT_DEVICE_RD | UMP_HINT_DEVICE_WR)) == + (UMP_HINT_DEVICE_RD | UMP_HINT_DEVICE_WR)) { + reg->flags |= KBASE_REG_CPU_CACHED; + *flags |= BASE_MEM_CACHED_CPU; + } + + if (cpu_flags & UMP_PROT_CPU_WR) { + reg->flags |= KBASE_REG_CPU_WR; + *flags |= BASE_MEM_PROT_CPU_WR; + } + + if (cpu_flags & UMP_PROT_CPU_RD) { + reg->flags |= KBASE_REG_CPU_RD; + *flags |= BASE_MEM_PROT_CPU_RD; + } + + if ((gpu_flags & (UMP_HINT_DEVICE_RD | UMP_HINT_DEVICE_WR)) == + (UMP_HINT_DEVICE_RD | UMP_HINT_DEVICE_WR)) + reg->flags |= KBASE_REG_GPU_CACHED; + + if (gpu_flags & UMP_PROT_DEVICE_WR) { + reg->flags |= KBASE_REG_GPU_WR; + *flags |= BASE_MEM_PROT_GPU_WR; + } + + if (gpu_flags & UMP_PROT_DEVICE_RD) { + reg->flags |= KBASE_REG_GPU_RD; + *flags |= BASE_MEM_PROT_GPU_RD; + } + + /* ump phys block query */ + ump_dd_phys_blocks_get_64(umph, &block_count, &block_array); + + for (i = 0; i < block_count; i++) { + for (j = 0; j < (block_array[i].size >> PAGE_SHIFT); j++) { + struct tagged_addr tagged; + + tagged = as_tagged(block_array[i].addr + + (j << PAGE_SHIFT)); + reg->gpu_alloc->pages[page] = tagged; + page++; + } + } + reg->gpu_alloc->nents = *va_pages; + reg->extent = 0; + + return reg; + +no_alloc_obj: + kfree(reg); +no_region: +bad_size: + ump_dd_release(umph); +bad_id: +bad_flags: + return NULL; +} +#endif /* CONFIG_UMP */ + +#ifdef CONFIG_DMA_SHARED_BUFFER +static struct kbase_va_region *kbase_mem_from_umm(struct kbase_context *kctx, + int fd, u64 *va_pages, u64 *flags, u32 padding) +{ + struct kbase_va_region *reg; + struct dma_buf *dma_buf; + struct dma_buf_attachment *dma_attachment; + bool shared_zone = false; + + dma_buf = dma_buf_get(fd); + if (IS_ERR_OR_NULL(dma_buf)) + goto no_buf; + + dma_attachment = dma_buf_attach(dma_buf, kctx->kbdev->dev); + if (!dma_attachment) + goto no_attachment; + + *va_pages = (PAGE_ALIGN(dma_buf->size) >> PAGE_SHIFT) + padding; + if (!*va_pages) + goto bad_size; + + if (*va_pages > (U64_MAX / PAGE_SIZE)) + /* 64-bit address range is the max */ + goto bad_size; + + /* ignore SAME_VA */ + *flags &= ~BASE_MEM_SAME_VA; + + if (*flags & BASE_MEM_IMPORT_SHARED) + shared_zone = true; + +#ifdef CONFIG_64BIT + if (!kbase_ctx_flag(kctx, KCTX_COMPAT)) { + /* + * 64-bit tasks require us to reserve VA on the CPU that we use + * on the GPU. + */ + shared_zone = true; + } +#endif + + if (shared_zone) { + *flags |= BASE_MEM_NEED_MMAP; + reg = kbase_alloc_free_region(kctx, 0, *va_pages, KBASE_REG_ZONE_SAME_VA); + } else { + reg = kbase_alloc_free_region(kctx, 0, *va_pages, KBASE_REG_ZONE_CUSTOM_VA); + } + + if (!reg) + goto no_region; + + reg->gpu_alloc = kbase_alloc_create(*va_pages, KBASE_MEM_TYPE_IMPORTED_UMM); + if (IS_ERR_OR_NULL(reg->gpu_alloc)) + goto no_alloc_obj; + + reg->cpu_alloc = kbase_mem_phy_alloc_get(reg->gpu_alloc); + + /* No pages to map yet */ + reg->gpu_alloc->nents = 0; + + if (kbase_update_region_flags(kctx, reg, *flags) != 0) + goto invalid_flags; + + reg->flags &= ~KBASE_REG_FREE; + reg->flags |= KBASE_REG_GPU_NX; /* UMM is always No eXecute */ + reg->flags &= ~KBASE_REG_GROWABLE; /* UMM cannot be grown */ + reg->flags |= KBASE_REG_GPU_CACHED; + + if (*flags & BASE_MEM_SECURE) + reg->flags |= KBASE_REG_SECURE; + + if (padding) + reg->flags |= KBASE_REG_IMPORT_PAD; + + reg->gpu_alloc->type = KBASE_MEM_TYPE_IMPORTED_UMM; + reg->gpu_alloc->imported.umm.sgt = NULL; + reg->gpu_alloc->imported.umm.dma_buf = dma_buf; + reg->gpu_alloc->imported.umm.dma_attachment = dma_attachment; + reg->gpu_alloc->imported.umm.current_mapping_usage_count = 0; + reg->extent = 0; + + return reg; + +invalid_flags: + kbase_mem_phy_alloc_put(reg->gpu_alloc); +no_alloc_obj: + kfree(reg); +no_region: +bad_size: + dma_buf_detach(dma_buf, dma_attachment); +no_attachment: + dma_buf_put(dma_buf); +no_buf: + return NULL; +} +#endif /* CONFIG_DMA_SHARED_BUFFER */ + +static u32 kbase_get_cache_line_alignment(struct kbase_context *kctx) +{ + u32 cpu_cache_line_size = cache_line_size(); + u32 gpu_cache_line_size = + (1UL << kctx->kbdev->gpu_props.props.l2_props.log2_line_size); + + return ((cpu_cache_line_size > gpu_cache_line_size) ? + cpu_cache_line_size : + gpu_cache_line_size); +} + +static struct kbase_va_region *kbase_mem_from_user_buffer( + struct kbase_context *kctx, unsigned long address, + unsigned long size, u64 *va_pages, u64 *flags) +{ + long i; + struct kbase_va_region *reg; + long faulted_pages; + int zone = KBASE_REG_ZONE_CUSTOM_VA; + bool shared_zone = false; + u32 cache_line_alignment = kbase_get_cache_line_alignment(kctx); + struct kbase_alloc_import_user_buf *user_buf; + struct page **pages = NULL; + + if ((address & (cache_line_alignment - 1)) != 0 || + (size & (cache_line_alignment - 1)) != 0) { + /* Coherency must be enabled to handle partial cache lines */ + if (*flags & (BASE_MEM_COHERENT_SYSTEM | + BASE_MEM_COHERENT_SYSTEM_REQUIRED)) { + /* Force coherent system required flag, import will + * then fail if coherency isn't available + */ + *flags |= BASE_MEM_COHERENT_SYSTEM_REQUIRED; + } else { + dev_warn(kctx->kbdev->dev, + "User buffer is not cache line aligned and no coherency enabled\n"); + goto bad_size; + } + } + + *va_pages = (PAGE_ALIGN(address + size) >> PAGE_SHIFT) - + PFN_DOWN(address); + if (!*va_pages) + goto bad_size; + + if (*va_pages > (UINT64_MAX / PAGE_SIZE)) + /* 64-bit address range is the max */ + goto bad_size; + + /* SAME_VA generally not supported with imported memory (no known use cases) */ + *flags &= ~BASE_MEM_SAME_VA; + + if (*flags & BASE_MEM_IMPORT_SHARED) + shared_zone = true; + +#ifdef CONFIG_64BIT + if (!kbase_ctx_flag(kctx, KCTX_COMPAT)) { + /* + * 64-bit tasks require us to reserve VA on the CPU that we use + * on the GPU. + */ + shared_zone = true; + } +#endif + + if (shared_zone) { + *flags |= BASE_MEM_NEED_MMAP; + zone = KBASE_REG_ZONE_SAME_VA; + } + + reg = kbase_alloc_free_region(kctx, 0, *va_pages, zone); + + if (!reg) + goto no_region; + + reg->gpu_alloc = kbase_alloc_create(*va_pages, + KBASE_MEM_TYPE_IMPORTED_USER_BUF); + if (IS_ERR_OR_NULL(reg->gpu_alloc)) + goto no_alloc_obj; + + reg->cpu_alloc = kbase_mem_phy_alloc_get(reg->gpu_alloc); + + if (kbase_update_region_flags(kctx, reg, *flags) != 0) + goto invalid_flags; + + reg->flags &= ~KBASE_REG_FREE; + reg->flags |= KBASE_REG_GPU_NX; /* User-buffers are always No eXecute */ + reg->flags &= ~KBASE_REG_GROWABLE; /* Cannot be grown */ + + user_buf = ®->gpu_alloc->imported.user_buf; + + user_buf->size = size; + user_buf->address = address; + user_buf->nr_pages = *va_pages; + user_buf->mm = current->mm; + user_buf->pages = kmalloc_array(*va_pages, sizeof(struct page *), + GFP_KERNEL); + + if (!user_buf->pages) + goto no_page_array; + + /* If the region is coherent with the CPU then the memory is imported + * and mapped onto the GPU immediately. + * Otherwise get_user_pages is called as a sanity check, but with + * NULL as the pages argument which will fault the pages, but not + * pin them. The memory will then be pinned only around the jobs that + * specify the region as an external resource. + */ + if (reg->flags & KBASE_REG_SHARE_BOTH) { + pages = user_buf->pages; + *flags |= KBASE_MEM_IMPORT_HAVE_PAGES; + } + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0) + down_read(¤t->mm->mmap_lock); +#else + down_read(¤t->mm->mmap_sem); +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) + faulted_pages = get_user_pages(current, current->mm, address, *va_pages, + reg->flags & KBASE_REG_GPU_WR, 0, pages, NULL); +#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0) + faulted_pages = get_user_pages(address, *va_pages, + reg->flags & KBASE_REG_GPU_WR, 0, pages, NULL); +#else + faulted_pages = get_user_pages(address, *va_pages, + reg->flags & KBASE_REG_GPU_WR ? FOLL_WRITE : 0, + pages, NULL); +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0) + up_read(¤t->mm->mmap_lock); +#else + up_read(¤t->mm->mmap_sem); +#endif + + if (faulted_pages != *va_pages) + goto fault_mismatch; + + atomic_inc(¤t->mm->mm_count); + + reg->gpu_alloc->nents = 0; + reg->extent = 0; + + if (pages) { + struct device *dev = kctx->kbdev->dev; + unsigned long local_size = user_buf->size; + unsigned long offset = user_buf->address & ~PAGE_MASK; + struct tagged_addr *pa = kbase_get_gpu_phy_pages(reg); + + /* Top bit signifies that this was pinned on import */ + user_buf->current_mapping_usage_count |= PINNED_ON_IMPORT; + + for (i = 0; i < faulted_pages; i++) { + dma_addr_t dma_addr; + unsigned long min; + + min = MIN(PAGE_SIZE - offset, local_size); + dma_addr = dma_map_page(dev, pages[i], + offset, min, + DMA_BIDIRECTIONAL); + if (dma_mapping_error(dev, dma_addr)) + goto unwind_dma_map; + + user_buf->dma_addrs[i] = dma_addr; + pa[i] = as_tagged(page_to_phys(pages[i])); + + local_size -= min; + offset = 0; + } + + reg->gpu_alloc->nents = faulted_pages; + } + + return reg; + +unwind_dma_map: + while (i--) { + dma_unmap_page(kctx->kbdev->dev, + user_buf->dma_addrs[i], + PAGE_SIZE, DMA_BIDIRECTIONAL); + } +fault_mismatch: + if (pages) { + for (i = 0; i < faulted_pages; i++) + put_page(pages[i]); + } + kfree(user_buf->pages); +no_page_array: +invalid_flags: + kbase_mem_phy_alloc_put(reg->cpu_alloc); + kbase_mem_phy_alloc_put(reg->gpu_alloc); +no_alloc_obj: + kfree(reg); +no_region: +bad_size: + return NULL; + +} + + +u64 kbase_mem_alias(struct kbase_context *kctx, u64 *flags, u64 stride, + u64 nents, struct base_mem_aliasing_info *ai, + u64 *num_pages) +{ + struct kbase_va_region *reg; + u64 gpu_va; + size_t i; + bool coherent; + + KBASE_DEBUG_ASSERT(kctx); + KBASE_DEBUG_ASSERT(flags); + KBASE_DEBUG_ASSERT(ai); + KBASE_DEBUG_ASSERT(num_pages); + + /* mask to only allowed flags */ + *flags &= (BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_GPU_WR | + BASE_MEM_COHERENT_SYSTEM | BASE_MEM_COHERENT_LOCAL | + BASE_MEM_COHERENT_SYSTEM_REQUIRED); + + if (!(*flags & (BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_GPU_WR))) { + dev_warn(kctx->kbdev->dev, + "kbase_mem_alias called with bad flags (%llx)", + (unsigned long long)*flags); + goto bad_flags; + } + coherent = (*flags & BASE_MEM_COHERENT_SYSTEM) != 0 || + (*flags & BASE_MEM_COHERENT_SYSTEM_REQUIRED) != 0; + + if (!stride) + goto bad_stride; + + if (!nents) + goto bad_nents; + + if ((nents * stride) > (U64_MAX / PAGE_SIZE)) + /* 64-bit address range is the max */ + goto bad_size; + + /* calculate the number of pages this alias will cover */ + *num_pages = nents * stride; + +#ifdef CONFIG_64BIT + if (!kbase_ctx_flag(kctx, KCTX_COMPAT)) { + /* 64-bit tasks must MMAP anyway, but not expose this address to + * clients */ + *flags |= BASE_MEM_NEED_MMAP; + reg = kbase_alloc_free_region(kctx, 0, *num_pages, + KBASE_REG_ZONE_SAME_VA); + } else { +#else + if (1) { +#endif + reg = kbase_alloc_free_region(kctx, 0, *num_pages, + KBASE_REG_ZONE_CUSTOM_VA); + } + + if (!reg) + goto no_reg; + + /* zero-sized page array, as we don't need one/can support one */ + reg->gpu_alloc = kbase_alloc_create(0, KBASE_MEM_TYPE_ALIAS); + if (IS_ERR_OR_NULL(reg->gpu_alloc)) + goto no_alloc_obj; + + reg->cpu_alloc = kbase_mem_phy_alloc_get(reg->gpu_alloc); + + if (kbase_update_region_flags(kctx, reg, *flags) != 0) + goto invalid_flags; + + reg->gpu_alloc->imported.alias.nents = nents; + reg->gpu_alloc->imported.alias.stride = stride; + reg->gpu_alloc->imported.alias.aliased = vzalloc(sizeof(*reg->gpu_alloc->imported.alias.aliased) * nents); + if (!reg->gpu_alloc->imported.alias.aliased) + goto no_aliased_array; + + kbase_gpu_vm_lock(kctx); + + /* validate and add src handles */ + for (i = 0; i < nents; i++) { + if (ai[i].handle.basep.handle < BASE_MEM_FIRST_FREE_ADDRESS) { + if (ai[i].handle.basep.handle != + BASEP_MEM_WRITE_ALLOC_PAGES_HANDLE) + goto bad_handle; /* unsupported magic handle */ + if (!ai[i].length) + goto bad_handle; /* must be > 0 */ + if (ai[i].length > stride) + goto bad_handle; /* can't be larger than the + stride */ + reg->gpu_alloc->imported.alias.aliased[i].length = ai[i].length; + } else { + struct kbase_va_region *aliasing_reg; + struct kbase_mem_phy_alloc *alloc; + + aliasing_reg = kbase_region_tracker_find_region_base_address( + kctx, + (ai[i].handle.basep.handle >> PAGE_SHIFT) << PAGE_SHIFT); + + /* validate found region */ + if (!aliasing_reg) + goto bad_handle; /* Not found */ + if (aliasing_reg->flags & KBASE_REG_FREE) + goto bad_handle; /* Free region */ + if (aliasing_reg->flags & KBASE_REG_DONT_NEED) + goto bad_handle; /* Ephemeral region */ + if (!aliasing_reg->gpu_alloc) + goto bad_handle; /* No alloc */ + if (aliasing_reg->gpu_alloc->type != KBASE_MEM_TYPE_NATIVE) + goto bad_handle; /* Not a native alloc */ + if (coherent != ((aliasing_reg->flags & KBASE_REG_SHARE_BOTH) != 0)) + goto bad_handle; + /* Non-coherent memory cannot alias + coherent memory, and vice versa.*/ + + /* check size against stride */ + if (!ai[i].length) + goto bad_handle; /* must be > 0 */ + if (ai[i].length > stride) + goto bad_handle; /* can't be larger than the + stride */ + + alloc = aliasing_reg->gpu_alloc; + + /* check against the alloc's size */ + if (ai[i].offset > alloc->nents) + goto bad_handle; /* beyond end */ + if (ai[i].offset + ai[i].length > alloc->nents) + goto bad_handle; /* beyond end */ + + reg->gpu_alloc->imported.alias.aliased[i].alloc = kbase_mem_phy_alloc_get(alloc); + reg->gpu_alloc->imported.alias.aliased[i].length = ai[i].length; + reg->gpu_alloc->imported.alias.aliased[i].offset = ai[i].offset; + } + } + +#ifdef CONFIG_64BIT + if (!kbase_ctx_flag(kctx, KCTX_COMPAT)) { + /* Bind to a cookie */ + if (!kctx->cookies) { + dev_err(kctx->kbdev->dev, "No cookies available for allocation!"); + goto no_cookie; + } + /* return a cookie */ + gpu_va = __ffs(kctx->cookies); + kctx->cookies &= ~(1UL << gpu_va); + BUG_ON(kctx->pending_regions[gpu_va]); + kctx->pending_regions[gpu_va] = reg; + + /* relocate to correct base */ + gpu_va += PFN_DOWN(BASE_MEM_COOKIE_BASE); + gpu_va <<= PAGE_SHIFT; + } else /* we control the VA */ { +#else + if (1) { +#endif + if (kbase_gpu_mmap(kctx, reg, 0, *num_pages, 1) != 0) { + dev_warn(kctx->kbdev->dev, "Failed to map memory on GPU"); + goto no_mmap; + } + /* return real GPU VA */ + gpu_va = reg->start_pfn << PAGE_SHIFT; + } + + reg->flags &= ~KBASE_REG_FREE; + reg->flags &= ~KBASE_REG_GROWABLE; + + kbase_gpu_vm_unlock(kctx); + + return gpu_va; + +#ifdef CONFIG_64BIT +no_cookie: +#endif +no_mmap: +bad_handle: + kbase_gpu_vm_unlock(kctx); +no_aliased_array: +invalid_flags: + kbase_mem_phy_alloc_put(reg->cpu_alloc); + kbase_mem_phy_alloc_put(reg->gpu_alloc); +no_alloc_obj: + kfree(reg); +no_reg: +bad_size: +bad_nents: +bad_stride: +bad_flags: + return 0; +} + +int kbase_mem_import(struct kbase_context *kctx, enum base_mem_import_type type, + void __user *phandle, u32 padding, u64 *gpu_va, u64 *va_pages, + u64 *flags) +{ + struct kbase_va_region *reg; + + KBASE_DEBUG_ASSERT(kctx); + KBASE_DEBUG_ASSERT(gpu_va); + KBASE_DEBUG_ASSERT(va_pages); + KBASE_DEBUG_ASSERT(flags); + +#ifdef CONFIG_64BIT + if (!kbase_ctx_flag(kctx, KCTX_COMPAT)) + *flags |= BASE_MEM_SAME_VA; +#endif + + if (!kbase_check_import_flags(*flags)) { + dev_warn(kctx->kbdev->dev, + "kbase_mem_import called with bad flags (%llx)", + (unsigned long long)*flags); + goto bad_flags; + } + + if ((*flags & BASE_MEM_COHERENT_SYSTEM_REQUIRED) != 0 && + !kbase_device_is_cpu_coherent(kctx->kbdev)) { + dev_warn(kctx->kbdev->dev, + "kbase_mem_import call required coherent mem when unavailable"); + goto bad_flags; + } + if ((*flags & BASE_MEM_COHERENT_SYSTEM) != 0 && + !kbase_device_is_cpu_coherent(kctx->kbdev)) { + /* Remove COHERENT_SYSTEM flag if coherent mem is unavailable */ + *flags &= ~BASE_MEM_COHERENT_SYSTEM; + } + + if ((padding != 0) && (type != BASE_MEM_IMPORT_TYPE_UMM)) { + dev_warn(kctx->kbdev->dev, + "padding is only supported for UMM"); + goto bad_flags; + } + + switch (type) { +#ifdef CONFIG_UMP + case BASE_MEM_IMPORT_TYPE_UMP: { + ump_secure_id id; + + if (get_user(id, (ump_secure_id __user *)phandle)) + reg = NULL; + else + reg = kbase_mem_from_ump(kctx, id, va_pages, flags); + } + break; +#endif /* CONFIG_UMP */ +#ifdef CONFIG_DMA_SHARED_BUFFER + case BASE_MEM_IMPORT_TYPE_UMM: { + int fd; + + if (get_user(fd, (int __user *)phandle)) + reg = NULL; + else + reg = kbase_mem_from_umm(kctx, fd, va_pages, flags, + padding); + } + break; +#endif /* CONFIG_DMA_SHARED_BUFFER */ + case BASE_MEM_IMPORT_TYPE_USER_BUFFER: { + struct base_mem_import_user_buffer user_buffer; + void __user *uptr; + + if (copy_from_user(&user_buffer, phandle, + sizeof(user_buffer))) { + reg = NULL; + } else { +#ifdef CONFIG_COMPAT + if (kbase_ctx_flag(kctx, KCTX_COMPAT)) + uptr = compat_ptr(user_buffer.ptr); + else +#endif + uptr = u64_to_user_ptr(user_buffer.ptr); + + reg = kbase_mem_from_user_buffer(kctx, + (unsigned long)uptr, user_buffer.length, + va_pages, flags); + } + break; + } + default: { + reg = NULL; + break; + } + } + + if (!reg) + goto no_reg; + + kbase_gpu_vm_lock(kctx); + + /* mmap needed to setup VA? */ + if (*flags & (BASE_MEM_SAME_VA | BASE_MEM_NEED_MMAP)) { + /* Bind to a cookie */ + if (!kctx->cookies) + goto no_cookie; + /* return a cookie */ + *gpu_va = __ffs(kctx->cookies); + kctx->cookies &= ~(1UL << *gpu_va); + BUG_ON(kctx->pending_regions[*gpu_va]); + kctx->pending_regions[*gpu_va] = reg; + + /* relocate to correct base */ + *gpu_va += PFN_DOWN(BASE_MEM_COOKIE_BASE); + *gpu_va <<= PAGE_SHIFT; + + } else if (*flags & KBASE_MEM_IMPORT_HAVE_PAGES) { + /* we control the VA, mmap now to the GPU */ + if (kbase_gpu_mmap(kctx, reg, 0, *va_pages, 1) != 0) + goto no_gpu_va; + /* return real GPU VA */ + *gpu_va = reg->start_pfn << PAGE_SHIFT; + } else { + /* we control the VA, but nothing to mmap yet */ + if (kbase_add_va_region(kctx, reg, 0, *va_pages, 1) != 0) + goto no_gpu_va; + /* return real GPU VA */ + *gpu_va = reg->start_pfn << PAGE_SHIFT; + } + + /* clear out private flags */ + *flags &= ((1UL << BASE_MEM_FLAGS_NR_BITS) - 1); + + kbase_gpu_vm_unlock(kctx); + + return 0; + +no_gpu_va: +no_cookie: + kbase_gpu_vm_unlock(kctx); + kbase_mem_phy_alloc_put(reg->cpu_alloc); + kbase_mem_phy_alloc_put(reg->gpu_alloc); + kfree(reg); +no_reg: +bad_flags: + *gpu_va = 0; + *va_pages = 0; + *flags = 0; + return -ENOMEM; +} + +int kbase_mem_grow_gpu_mapping(struct kbase_context *kctx, + struct kbase_va_region *reg, + u64 new_pages, u64 old_pages) +{ + struct tagged_addr *phy_pages; + u64 delta = new_pages - old_pages; + int ret = 0; + + lockdep_assert_held(&kctx->reg_lock); + + /* Map the new pages into the GPU */ + phy_pages = kbase_get_gpu_phy_pages(reg); + ret = kbase_mmu_insert_pages(kctx, reg->start_pfn + old_pages, + phy_pages + old_pages, delta, reg->flags); + + return ret; +} + +static void kbase_mem_shrink_cpu_mapping(struct kbase_context *kctx, + struct kbase_va_region *reg, + u64 new_pages, u64 old_pages) +{ + u64 gpu_va_start = reg->start_pfn; + + if (new_pages == old_pages) + /* Nothing to do */ + return; + + unmap_mapping_range(kctx->filp->f_inode->i_mapping, + (gpu_va_start + new_pages)<start_pfn + new_pages, delta); + + return ret; +} + +int kbase_mem_commit(struct kbase_context *kctx, u64 gpu_addr, u64 new_pages) +{ + u64 old_pages; + u64 delta; + int res = -EINVAL; + struct kbase_va_region *reg; + bool read_locked = false; + + KBASE_DEBUG_ASSERT(kctx); + KBASE_DEBUG_ASSERT(gpu_addr != 0); + + if (gpu_addr & ~PAGE_MASK) { + dev_warn(kctx->kbdev->dev, "kbase:mem_commit: gpu_addr: passed parameter is invalid"); + return -EINVAL; + } + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0) + down_write(¤t->mm->mmap_lock); +#else + down_write(¤t->mm->mmap_sem); +#endif + kbase_gpu_vm_lock(kctx); + + /* Validate the region */ + reg = kbase_region_tracker_find_region_base_address(kctx, gpu_addr); + if (!reg || (reg->flags & KBASE_REG_FREE)) + goto out_unlock; + + KBASE_DEBUG_ASSERT(reg->cpu_alloc); + KBASE_DEBUG_ASSERT(reg->gpu_alloc); + + if (reg->gpu_alloc->type != KBASE_MEM_TYPE_NATIVE) + goto out_unlock; + + if (0 == (reg->flags & KBASE_REG_GROWABLE)) + goto out_unlock; + + /* Would overflow the VA region */ + if (new_pages > reg->nr_pages) + goto out_unlock; + + /* can't be mapped more than once on the GPU */ + if (atomic_read(®->gpu_alloc->gpu_mappings) > 1) + goto out_unlock; + /* can't grow regions which are ephemeral */ + if (reg->flags & KBASE_REG_DONT_NEED) + goto out_unlock; + + if (new_pages == reg->gpu_alloc->nents) { + /* no change */ + res = 0; + goto out_unlock; + } + + old_pages = kbase_reg_current_backed_size(reg); + if (new_pages > old_pages) { + delta = new_pages - old_pages; + + /* + * No update to the mm so downgrade the writer lock to a read + * lock so other readers aren't blocked after this point. + */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0) + downgrade_write(¤t->mm->mmap_lock); +#else + downgrade_write(¤t->mm->mmap_sem); +#endif + read_locked = true; + + /* Allocate some more pages */ + if (kbase_alloc_phy_pages_helper(reg->cpu_alloc, delta) != 0) { + res = -ENOMEM; + goto out_unlock; + } + if (reg->cpu_alloc != reg->gpu_alloc) { + if (kbase_alloc_phy_pages_helper( + reg->gpu_alloc, delta) != 0) { + res = -ENOMEM; + kbase_free_phy_pages_helper(reg->cpu_alloc, + delta); + goto out_unlock; + } + } + + /* No update required for CPU mappings, that's done on fault. */ + + /* Update GPU mapping. */ + res = kbase_mem_grow_gpu_mapping(kctx, reg, + new_pages, old_pages); + + /* On error free the new pages */ + if (res) { + kbase_free_phy_pages_helper(reg->cpu_alloc, delta); + if (reg->cpu_alloc != reg->gpu_alloc) + kbase_free_phy_pages_helper(reg->gpu_alloc, + delta); + res = -ENOMEM; + goto out_unlock; + } + } else { + delta = old_pages - new_pages; + + /* Update all CPU mapping(s) */ + kbase_mem_shrink_cpu_mapping(kctx, reg, + new_pages, old_pages); + + /* Update the GPU mapping */ + res = kbase_mem_shrink_gpu_mapping(kctx, reg, + new_pages, old_pages); + if (res) { + res = -ENOMEM; + goto out_unlock; + } + + kbase_free_phy_pages_helper(reg->cpu_alloc, delta); + if (reg->cpu_alloc != reg->gpu_alloc) + kbase_free_phy_pages_helper(reg->gpu_alloc, delta); + } + +out_unlock: + kbase_gpu_vm_unlock(kctx); + if (read_locked) +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0) + up_read(¤t->mm->mmap_lock); +#else + up_read(¤t->mm->mmap_sem); +#endif + else +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0) + up_write(¤t->mm->mmap_lock); +#else + up_write(¤t->mm->mmap_sem); +#endif + + return res; +} + +static void kbase_cpu_vm_open(struct vm_area_struct *vma) +{ + struct kbase_cpu_mapping *map = vma->vm_private_data; + + KBASE_DEBUG_ASSERT(map); + KBASE_DEBUG_ASSERT(map->count > 0); + /* non-atomic as we're under Linux' mm lock */ + map->count++; +} + +static void kbase_cpu_vm_close(struct vm_area_struct *vma) +{ + struct kbase_cpu_mapping *map = vma->vm_private_data; + + KBASE_DEBUG_ASSERT(map); + KBASE_DEBUG_ASSERT(map->count > 0); + + /* non-atomic as we're under Linux' mm lock */ + if (--map->count) + return; + + KBASE_DEBUG_ASSERT(map->kctx); + KBASE_DEBUG_ASSERT(map->alloc); + + kbase_gpu_vm_lock(map->kctx); + + if (map->free_on_close) { + KBASE_DEBUG_ASSERT((map->region->flags & KBASE_REG_ZONE_MASK) == + KBASE_REG_ZONE_SAME_VA); + /* Avoid freeing memory on the process death which results in + * GPU Page Fault. Memory will be freed in kbase_destroy_context + */ + if (!(current->flags & PF_EXITING)) + kbase_mem_free_region(map->kctx, map->region); + } + + list_del(&map->mappings_list); + + kbase_gpu_vm_unlock(map->kctx); + + kbase_mem_phy_alloc_put(map->alloc); + kfree(map); +} + +KBASE_EXPORT_TEST_API(kbase_cpu_vm_close); + + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) +static int kbase_cpu_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +{ +#else +static vm_fault_t kbase_cpu_vm_fault(struct vm_fault *vmf) +{ + struct vm_area_struct *vma = vmf->vma; +#endif + struct kbase_cpu_mapping *map = vma->vm_private_data; + pgoff_t rel_pgoff; + size_t i; + pgoff_t addr; + + KBASE_DEBUG_ASSERT(map); + KBASE_DEBUG_ASSERT(map->count > 0); + KBASE_DEBUG_ASSERT(map->kctx); + KBASE_DEBUG_ASSERT(map->alloc); + + rel_pgoff = vmf->pgoff - map->region->start_pfn; + + kbase_gpu_vm_lock(map->kctx); + if (rel_pgoff >= map->alloc->nents) + goto locked_bad_fault; + + /* Fault on access to DONT_NEED regions */ + if (map->alloc->reg && (map->alloc->reg->flags & KBASE_REG_DONT_NEED)) + goto locked_bad_fault; + + /* insert all valid pages from the fault location */ + i = rel_pgoff; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)) + addr = (pgoff_t)((uintptr_t)vmf->virtual_address >> PAGE_SHIFT); +#else + addr = (pgoff_t)(vmf->address >> PAGE_SHIFT); +#endif + while (i < map->alloc->nents && (addr < vma->vm_end >> PAGE_SHIFT)) { + int ret = vm_insert_pfn(vma, addr << PAGE_SHIFT, + PFN_DOWN(as_phys_addr_t(map->alloc->pages[i]))); + if (ret < 0 && ret != -EBUSY) + goto locked_bad_fault; + + i++; addr++; + } + + kbase_gpu_vm_unlock(map->kctx); + /* we resolved it, nothing for VM to do */ + return VM_FAULT_NOPAGE; + +locked_bad_fault: + kbase_gpu_vm_unlock(map->kctx); + return VM_FAULT_SIGBUS; +} + +const struct vm_operations_struct kbase_vm_ops = { + .open = kbase_cpu_vm_open, + .close = kbase_cpu_vm_close, + .fault = kbase_cpu_vm_fault +}; + +static int kbase_cpu_mmap(struct kbase_va_region *reg, struct vm_area_struct *vma, void *kaddr, size_t nr_pages, unsigned long aligned_offset, int free_on_close) +{ + struct kbase_cpu_mapping *map; + struct tagged_addr *page_array; + int err = 0; + int i; + + map = kzalloc(sizeof(*map), GFP_KERNEL); + + if (!map) { + WARN_ON(1); + err = -ENOMEM; + goto out; + } + + /* + * VM_DONTCOPY - don't make this mapping available in fork'ed processes + * VM_DONTEXPAND - disable mremap on this region + * VM_IO - disables paging + * VM_DONTDUMP - Don't include in core dumps (3.7 only) + * VM_MIXEDMAP - Support mixing struct page*s and raw pfns. + * This is needed to support using the dedicated and + * the OS based memory backends together. + */ + /* + * This will need updating to propagate coherency flags + * See MIDBASE-1057 + */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) + vma->vm_flags |= VM_DONTCOPY | VM_DONTDUMP | VM_DONTEXPAND | VM_IO; +#else + vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_RESERVED | VM_IO; +#endif + vma->vm_ops = &kbase_vm_ops; + vma->vm_private_data = map; + + page_array = kbase_get_cpu_phy_pages(reg); + + if (!(reg->flags & KBASE_REG_CPU_CACHED) && + (reg->flags & (KBASE_REG_CPU_WR|KBASE_REG_CPU_RD))) { + /* We can't map vmalloc'd memory uncached. + * Other memory will have been returned from + * kbase_mem_pool which would be + * suitable for mapping uncached. + */ + BUG_ON(kaddr); + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); + } + + if (!kaddr) { + unsigned long addr = vma->vm_start + aligned_offset; + u64 start_off = vma->vm_pgoff - reg->start_pfn + + (aligned_offset>>PAGE_SHIFT); + + vma->vm_flags |= VM_PFNMAP; + for (i = 0; i < nr_pages; i++) { + phys_addr_t phys; + + phys = as_phys_addr_t(page_array[i + start_off]); + err = vm_insert_pfn(vma, addr, PFN_DOWN(phys)); + if (WARN_ON(err)) + break; + + addr += PAGE_SIZE; + } + } else { + WARN_ON(aligned_offset); + /* MIXEDMAP so we can vfree the kaddr early and not track it after map time */ + vma->vm_flags |= VM_MIXEDMAP; + /* vmalloc remaping is easy... */ + err = remap_vmalloc_range(vma, kaddr, 0); + WARN_ON(err); + } + + if (err) { + kfree(map); + goto out; + } + + map->region = reg; + map->free_on_close = free_on_close; + map->kctx = reg->kctx; + map->alloc = kbase_mem_phy_alloc_get(reg->cpu_alloc); + map->count = 1; /* start with one ref */ + + if (reg->flags & KBASE_REG_CPU_CACHED) + map->alloc->properties |= KBASE_MEM_PHY_ALLOC_ACCESSED_CACHED; + + list_add(&map->mappings_list, &map->alloc->mappings); + + out: + return err; +} + +static int kbase_trace_buffer_mmap(struct kbase_context *kctx, struct vm_area_struct *vma, struct kbase_va_region **const reg, void **const kaddr) +{ + struct kbase_va_region *new_reg; + u32 nr_pages; + size_t size; + int err = 0; + u32 *tb; + int owns_tb = 1; + + dev_dbg(kctx->kbdev->dev, "in %s\n", __func__); + size = (vma->vm_end - vma->vm_start); + nr_pages = size >> PAGE_SHIFT; + + if (!kctx->jctx.tb) { + KBASE_DEBUG_ASSERT(0 != size); + tb = vmalloc_user(size); + + if (NULL == tb) { + err = -ENOMEM; + goto out; + } + + err = kbase_device_trace_buffer_install(kctx, tb, size); + if (err) { + vfree(tb); + goto out; + } + } else { + err = -EINVAL; + goto out; + } + + *kaddr = kctx->jctx.tb; + + new_reg = kbase_alloc_free_region(kctx, 0, nr_pages, KBASE_REG_ZONE_SAME_VA); + if (!new_reg) { + err = -ENOMEM; + WARN_ON(1); + goto out_no_region; + } + + new_reg->cpu_alloc = kbase_alloc_create(0, KBASE_MEM_TYPE_TB); + if (IS_ERR_OR_NULL(new_reg->cpu_alloc)) { + err = -ENOMEM; + new_reg->cpu_alloc = NULL; + WARN_ON(1); + goto out_no_alloc; + } + + new_reg->gpu_alloc = kbase_mem_phy_alloc_get(new_reg->cpu_alloc); + + new_reg->cpu_alloc->imported.kctx = kctx; + new_reg->flags &= ~KBASE_REG_FREE; + new_reg->flags |= KBASE_REG_CPU_CACHED; + + /* alloc now owns the tb */ + owns_tb = 0; + + if (kbase_add_va_region(kctx, new_reg, vma->vm_start, nr_pages, 1) != 0) { + err = -ENOMEM; + WARN_ON(1); + goto out_no_va_region; + } + + *reg = new_reg; + + /* map read only, noexec */ + vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_EXEC | VM_MAYEXEC); + /* the rest of the flags is added by the cpu_mmap handler */ + + dev_dbg(kctx->kbdev->dev, "%s done\n", __func__); + return 0; + +out_no_va_region: +out_no_alloc: + kbase_free_alloced_region(new_reg); +out_no_region: + if (owns_tb) { + kbase_device_trace_buffer_uninstall(kctx); + vfree(tb); + } +out: + return err; +} + +static int kbase_mmu_dump_mmap(struct kbase_context *kctx, struct vm_area_struct *vma, struct kbase_va_region **const reg, void **const kmap_addr) +{ + struct kbase_va_region *new_reg; + void *kaddr; + u32 nr_pages; + size_t size; + int err = 0; + + dev_dbg(kctx->kbdev->dev, "in kbase_mmu_dump_mmap\n"); + size = (vma->vm_end - vma->vm_start); + nr_pages = size >> PAGE_SHIFT; + + kaddr = kbase_mmu_dump(kctx, nr_pages); + + if (!kaddr) { + err = -ENOMEM; + goto out; + } + + new_reg = kbase_alloc_free_region(kctx, 0, nr_pages, KBASE_REG_ZONE_SAME_VA); + if (!new_reg) { + err = -ENOMEM; + WARN_ON(1); + goto out; + } + + new_reg->cpu_alloc = kbase_alloc_create(0, KBASE_MEM_TYPE_RAW); + if (IS_ERR_OR_NULL(new_reg->cpu_alloc)) { + err = -ENOMEM; + new_reg->cpu_alloc = NULL; + WARN_ON(1); + goto out_no_alloc; + } + + new_reg->gpu_alloc = kbase_mem_phy_alloc_get(new_reg->cpu_alloc); + + new_reg->flags &= ~KBASE_REG_FREE; + new_reg->flags |= KBASE_REG_CPU_CACHED; + if (kbase_add_va_region(kctx, new_reg, vma->vm_start, nr_pages, 1) != 0) { + err = -ENOMEM; + WARN_ON(1); + goto out_va_region; + } + + *kmap_addr = kaddr; + *reg = new_reg; + + dev_dbg(kctx->kbdev->dev, "kbase_mmu_dump_mmap done\n"); + return 0; + +out_no_alloc: +out_va_region: + kbase_free_alloced_region(new_reg); +out: + return err; +} + + +void kbase_os_mem_map_lock(struct kbase_context *kctx) +{ + struct mm_struct *mm = current->mm; + (void)kctx; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0) + down_read(&mm->mmap_lock); +#else + down_read(&mm->mmap_sem); +#endif +} + +void kbase_os_mem_map_unlock(struct kbase_context *kctx) +{ + struct mm_struct *mm = current->mm; + (void)kctx; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0) + up_read(&mm->mmap_lock); +#else + up_read(&mm->mmap_sem); +#endif +} + +static int kbasep_reg_mmap(struct kbase_context *kctx, + struct vm_area_struct *vma, + struct kbase_va_region **regm, + size_t *nr_pages, size_t *aligned_offset) + +{ + int cookie = vma->vm_pgoff - PFN_DOWN(BASE_MEM_COOKIE_BASE); + struct kbase_va_region *reg; + int err = 0; + + *aligned_offset = 0; + + dev_dbg(kctx->kbdev->dev, "in kbasep_reg_mmap\n"); + + /* SAME_VA stuff, fetch the right region */ + reg = kctx->pending_regions[cookie]; + if (!reg) { + err = -ENOMEM; + goto out; + } + + if ((reg->flags & KBASE_REG_GPU_NX) && (reg->nr_pages != *nr_pages)) { + /* incorrect mmap size */ + /* leave the cookie for a potential later + * mapping, or to be reclaimed later when the + * context is freed */ + err = -ENOMEM; + goto out; + } + + if ((vma->vm_flags & VM_READ && !(reg->flags & KBASE_REG_CPU_RD)) || + (vma->vm_flags & VM_WRITE && !(reg->flags & KBASE_REG_CPU_WR))) { + /* VM flags inconsistent with region flags */ + err = -EPERM; + dev_err(kctx->kbdev->dev, "%s:%d inconsistent VM flags\n", + __FILE__, __LINE__); + goto out; + } + + /* adjust down nr_pages to what we have physically */ + *nr_pages = kbase_reg_current_backed_size(reg); + + if (kbase_gpu_mmap(kctx, reg, vma->vm_start + *aligned_offset, + reg->nr_pages, 1) != 0) { + dev_err(kctx->kbdev->dev, "%s:%d\n", __FILE__, __LINE__); + /* Unable to map in GPU space. */ + WARN_ON(1); + err = -ENOMEM; + goto out; + } + /* no need for the cookie anymore */ + kctx->pending_regions[cookie] = NULL; + kctx->cookies |= (1UL << cookie); + + /* + * Overwrite the offset with the region start_pfn, so we effectively + * map from offset 0 in the region. However subtract the aligned + * offset so that when user space trims the mapping the beginning of + * the trimmed VMA has the correct vm_pgoff; + */ + vma->vm_pgoff = reg->start_pfn - ((*aligned_offset)>>PAGE_SHIFT); +out: + *regm = reg; + dev_dbg(kctx->kbdev->dev, "kbasep_reg_mmap done\n"); + + return err; +} + +int kbase_mmap(struct file *file, struct vm_area_struct *vma) +{ + struct kbase_context *kctx = file->private_data; + struct kbase_va_region *reg = NULL; + void *kaddr = NULL; + size_t nr_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; + int err = 0; + int free_on_close = 0; + struct device *dev = kctx->kbdev->dev; + size_t aligned_offset = 0; + + dev_dbg(dev, "kbase_mmap\n"); + + /* strip away corresponding VM_MAY% flags to the VM_% flags requested */ + vma->vm_flags &= ~((vma->vm_flags & (VM_READ | VM_WRITE)) << 4); + + if (0 == nr_pages) { + err = -EINVAL; + goto out; + } + + if (!(vma->vm_flags & VM_SHARED)) { + err = -EINVAL; + goto out; + } + + kbase_gpu_vm_lock(kctx); + + if (vma->vm_pgoff == PFN_DOWN(BASE_MEM_MAP_TRACKING_HANDLE)) { + /* The non-mapped tracking helper page */ + err = kbase_tracking_page_setup(kctx, vma); + goto out_unlock; + } + + /* if not the MTP, verify that the MTP has been mapped */ + rcu_read_lock(); + /* catches both when the special page isn't present or + * when we've forked */ + if (rcu_dereference(kctx->process_mm) != current->mm) { + err = -EINVAL; + rcu_read_unlock(); + goto out_unlock; + } + rcu_read_unlock(); + + switch (vma->vm_pgoff) { + case PFN_DOWN(BASEP_MEM_INVALID_HANDLE): + case PFN_DOWN(BASEP_MEM_WRITE_ALLOC_PAGES_HANDLE): + /* Illegal handle for direct map */ + err = -EINVAL; + goto out_unlock; + case PFN_DOWN(BASE_MEM_TRACE_BUFFER_HANDLE): + err = kbase_trace_buffer_mmap(kctx, vma, ®, &kaddr); + if (0 != err) + goto out_unlock; + dev_dbg(dev, "kbase_trace_buffer_mmap ok\n"); + /* free the region on munmap */ + free_on_close = 1; + break; + case PFN_DOWN(BASE_MEM_MMU_DUMP_HANDLE): + /* MMU dump */ + err = kbase_mmu_dump_mmap(kctx, vma, ®, &kaddr); + if (0 != err) + goto out_unlock; + /* free the region on munmap */ + free_on_close = 1; + break; + case PFN_DOWN(BASE_MEM_COOKIE_BASE) ... + PFN_DOWN(BASE_MEM_FIRST_FREE_ADDRESS) - 1: { + err = kbasep_reg_mmap(kctx, vma, ®, &nr_pages, + &aligned_offset); + if (0 != err) + goto out_unlock; + /* free the region on munmap */ + free_on_close = 1; + break; + } + default: { + reg = kbase_region_tracker_find_region_enclosing_address(kctx, + (u64)vma->vm_pgoff << PAGE_SHIFT); + + if (reg && !(reg->flags & KBASE_REG_FREE)) { + /* will this mapping overflow the size of the region? */ + if (nr_pages > (reg->nr_pages - + (vma->vm_pgoff - reg->start_pfn))) { + err = -ENOMEM; + goto out_unlock; + } + + if ((vma->vm_flags & VM_READ && + !(reg->flags & KBASE_REG_CPU_RD)) || + (vma->vm_flags & VM_WRITE && + !(reg->flags & KBASE_REG_CPU_WR))) { + /* VM flags inconsistent with region flags */ + err = -EPERM; + dev_err(dev, "%s:%d inconsistent VM flags\n", + __FILE__, __LINE__); + goto out_unlock; + } + +#ifdef CONFIG_DMA_SHARED_BUFFER + if (KBASE_MEM_TYPE_IMPORTED_UMM == + reg->cpu_alloc->type) { + err = dma_buf_mmap( + reg->cpu_alloc->imported.umm.dma_buf, + vma, vma->vm_pgoff - reg->start_pfn); + goto out_unlock; + } +#endif /* CONFIG_DMA_SHARED_BUFFER */ + + /* limit what we map to the amount currently backed */ + if (reg->cpu_alloc->nents < (vma->vm_pgoff - reg->start_pfn + nr_pages)) { + if ((vma->vm_pgoff - reg->start_pfn) >= reg->cpu_alloc->nents) + nr_pages = 0; + else + nr_pages = reg->cpu_alloc->nents - (vma->vm_pgoff - reg->start_pfn); + } + } else { + err = -ENOMEM; + goto out_unlock; + } + } /* default */ + } /* switch */ + + err = kbase_cpu_mmap(reg, vma, kaddr, nr_pages, aligned_offset, free_on_close); + + if (vma->vm_pgoff == PFN_DOWN(BASE_MEM_MMU_DUMP_HANDLE)) { + /* MMU dump - userspace should now have a reference on + * the pages, so we can now free the kernel mapping */ + vfree(kaddr); + } + +out_unlock: + kbase_gpu_vm_unlock(kctx); +out: + if (err) + dev_err(dev, "mmap failed %d\n", err); + + return err; +} + +KBASE_EXPORT_TEST_API(kbase_mmap); + +static void kbasep_sync_mem_regions(struct kbase_context *kctx, + struct kbase_vmap_struct *map, enum kbase_sync_type dest) +{ + size_t i; + off_t const offset = (uintptr_t)map->gpu_addr & ~PAGE_MASK; + size_t const page_count = PFN_UP(offset + map->size); + + /* Sync first page */ + size_t sz = MIN(((size_t) PAGE_SIZE - offset), map->size); + struct tagged_addr cpu_pa = map->cpu_pages[0]; + struct tagged_addr gpu_pa = map->gpu_pages[0]; + + kbase_sync_single(kctx, cpu_pa, gpu_pa, offset, sz, dest); + + /* Sync middle pages (if any) */ + for (i = 1; page_count > 2 && i < page_count - 1; i++) { + cpu_pa = map->cpu_pages[i]; + gpu_pa = map->gpu_pages[i]; + kbase_sync_single(kctx, cpu_pa, gpu_pa, 0, PAGE_SIZE, dest); + } + + /* Sync last page (if any) */ + if (page_count > 1) { + cpu_pa = map->cpu_pages[page_count - 1]; + gpu_pa = map->gpu_pages[page_count - 1]; + sz = ((offset + map->size - 1) & ~PAGE_MASK) + 1; + kbase_sync_single(kctx, cpu_pa, gpu_pa, 0, sz, dest); + } +} + +void *kbase_vmap_prot(struct kbase_context *kctx, u64 gpu_addr, size_t size, + unsigned long prot_request, struct kbase_vmap_struct *map) +{ + struct kbase_va_region *reg; + unsigned long page_index; + unsigned int offset = gpu_addr & ~PAGE_MASK; + size_t page_count = PFN_UP(offset + size); + struct tagged_addr *page_array; + struct page **pages; + void *cpu_addr = NULL; + pgprot_t prot; + size_t i; + + if (!size || !map) + return NULL; + + /* check if page_count calculation will wrap */ + if (size > ((size_t)-1 / PAGE_SIZE)) + return NULL; + + kbase_gpu_vm_lock(kctx); + + reg = kbase_region_tracker_find_region_enclosing_address(kctx, gpu_addr); + if (!reg || (reg->flags & KBASE_REG_FREE)) + goto out_unlock; + + page_index = (gpu_addr >> PAGE_SHIFT) - reg->start_pfn; + + /* check if page_index + page_count will wrap */ + if (-1UL - page_count < page_index) + goto out_unlock; + + if (page_index + page_count > kbase_reg_current_backed_size(reg)) + goto out_unlock; + + if (reg->flags & KBASE_REG_DONT_NEED) + goto out_unlock; + + /* check access permissions can be satisfied + * Intended only for checking KBASE_REG_{CPU,GPU}_{RD,WR} */ + if ((reg->flags & prot_request) != prot_request) + goto out_unlock; + + page_array = kbase_get_cpu_phy_pages(reg); + if (!page_array) + goto out_unlock; + + pages = kmalloc_array(page_count, sizeof(struct page *), GFP_KERNEL); + if (!pages) + goto out_unlock; + + for (i = 0; i < page_count; i++) + pages[i] = phys_to_page(as_phys_addr_t(page_array[page_index + + i])); + + prot = PAGE_KERNEL; + if (!(reg->flags & KBASE_REG_CPU_CACHED)) { + /* Map uncached */ + prot = pgprot_writecombine(prot); + } + /* Note: enforcing a RO prot_request onto prot is not done, since: + * - CPU-arch-specific integration required + * - kbase_vmap() requires no access checks to be made/enforced */ + + cpu_addr = vmap(pages, page_count, VM_MAP, prot); + + kfree(pages); + + if (!cpu_addr) + goto out_unlock; + + map->gpu_addr = gpu_addr; + map->cpu_alloc = kbase_mem_phy_alloc_get(reg->cpu_alloc); + map->cpu_pages = &kbase_get_cpu_phy_pages(reg)[page_index]; + map->gpu_alloc = kbase_mem_phy_alloc_get(reg->gpu_alloc); + map->gpu_pages = &kbase_get_gpu_phy_pages(reg)[page_index]; + map->addr = (void *)((uintptr_t)cpu_addr + offset); + map->size = size; + map->sync_needed = ((reg->flags & KBASE_REG_CPU_CACHED) != 0) && + !kbase_mem_is_imported(map->gpu_alloc->type); + + if (map->sync_needed) + kbasep_sync_mem_regions(kctx, map, KBASE_SYNC_TO_CPU); + kbase_gpu_vm_unlock(kctx); + + return map->addr; + +out_unlock: + kbase_gpu_vm_unlock(kctx); + return NULL; +} + +void *kbase_vmap(struct kbase_context *kctx, u64 gpu_addr, size_t size, + struct kbase_vmap_struct *map) +{ + /* 0 is specified for prot_request to indicate no access checks should + * be made. + * + * As mentioned in kbase_vmap_prot() this means that a kernel-side + * CPU-RO mapping is not enforced to allow this to work */ + return kbase_vmap_prot(kctx, gpu_addr, size, 0u, map); +} +KBASE_EXPORT_TEST_API(kbase_vmap); + +void kbase_vunmap(struct kbase_context *kctx, struct kbase_vmap_struct *map) +{ + void *addr = (void *)((uintptr_t)map->addr & PAGE_MASK); + vunmap(addr); + + if (map->sync_needed) + kbasep_sync_mem_regions(kctx, map, KBASE_SYNC_TO_DEVICE); + map->gpu_addr = 0; + map->cpu_alloc = kbase_mem_phy_alloc_put(map->cpu_alloc); + map->gpu_alloc = kbase_mem_phy_alloc_put(map->gpu_alloc); + map->cpu_pages = NULL; + map->gpu_pages = NULL; + map->addr = NULL; + map->size = 0; + map->sync_needed = false; +} +KBASE_EXPORT_TEST_API(kbase_vunmap); + +void kbasep_os_process_page_usage_update(struct kbase_context *kctx, int pages) +{ + struct mm_struct *mm; + + rcu_read_lock(); + mm = rcu_dereference(kctx->process_mm); + if (mm) { + atomic_add(pages, &kctx->nonmapped_pages); +#ifdef SPLIT_RSS_COUNTING + add_mm_counter(mm, MM_FILEPAGES, pages); +#else + spin_lock(&mm->page_table_lock); + add_mm_counter(mm, MM_FILEPAGES, pages); + spin_unlock(&mm->page_table_lock); +#endif + } + rcu_read_unlock(); +} + +static void kbasep_os_process_page_usage_drain(struct kbase_context *kctx) +{ + int pages; + struct mm_struct *mm; + + spin_lock(&kctx->mm_update_lock); + mm = rcu_dereference_protected(kctx->process_mm, lockdep_is_held(&kctx->mm_update_lock)); + if (!mm) { + spin_unlock(&kctx->mm_update_lock); + return; + } + + rcu_assign_pointer(kctx->process_mm, NULL); + spin_unlock(&kctx->mm_update_lock); + synchronize_rcu(); + + pages = atomic_xchg(&kctx->nonmapped_pages, 0); +#ifdef SPLIT_RSS_COUNTING + add_mm_counter(mm, MM_FILEPAGES, -pages); +#else + spin_lock(&mm->page_table_lock); + add_mm_counter(mm, MM_FILEPAGES, -pages); + spin_unlock(&mm->page_table_lock); +#endif +} + +static void kbase_special_vm_close(struct vm_area_struct *vma) +{ + struct kbase_context *kctx; + + kctx = vma->vm_private_data; + kbasep_os_process_page_usage_drain(kctx); +} + +static const struct vm_operations_struct kbase_vm_special_ops = { + .close = kbase_special_vm_close, +}; + +static int kbase_tracking_page_setup(struct kbase_context *kctx, struct vm_area_struct *vma) +{ + /* check that this is the only tracking page */ + spin_lock(&kctx->mm_update_lock); + if (rcu_dereference_protected(kctx->process_mm, lockdep_is_held(&kctx->mm_update_lock))) { + spin_unlock(&kctx->mm_update_lock); + return -EFAULT; + } + + rcu_assign_pointer(kctx->process_mm, current->mm); + + spin_unlock(&kctx->mm_update_lock); + + /* no real access */ + vma->vm_flags &= ~(VM_READ | VM_MAYREAD | VM_WRITE | VM_MAYWRITE | VM_EXEC | VM_MAYEXEC); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) + vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP | VM_IO; +#else + vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_RESERVED | VM_IO; +#endif + vma->vm_ops = &kbase_vm_special_ops; + vma->vm_private_data = kctx; + + return 0; +} +void *kbase_va_alloc(struct kbase_context *kctx, u32 size, struct kbase_hwc_dma_mapping *handle) +{ + int res; + void *va; + dma_addr_t dma_pa; + struct kbase_va_region *reg; + struct tagged_addr *page_array; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)) + unsigned long attrs = DMA_ATTR_WRITE_COMBINE; +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)) + DEFINE_DMA_ATTRS(attrs); +#endif + + u32 pages = ((size - 1) >> PAGE_SHIFT) + 1; + u32 flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_CPU_WR | + BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_GPU_WR; + u32 i; + + KBASE_DEBUG_ASSERT(kctx != NULL); + KBASE_DEBUG_ASSERT(0 != size); + KBASE_DEBUG_ASSERT(0 != pages); + + if (size == 0) + goto err; + + /* All the alloc calls return zeroed memory */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)) + va = dma_alloc_attrs(kctx->kbdev->dev, size, &dma_pa, GFP_KERNEL, + attrs); +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)) + dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); + va = dma_alloc_attrs(kctx->kbdev->dev, size, &dma_pa, GFP_KERNEL, + &attrs); +#else + va = dma_alloc_writecombine(kctx->kbdev->dev, size, &dma_pa, GFP_KERNEL); +#endif + if (!va) + goto err; + + /* Store the state so we can free it later. */ + handle->cpu_va = va; + handle->dma_pa = dma_pa; + handle->size = size; + + + reg = kbase_alloc_free_region(kctx, 0, pages, KBASE_REG_ZONE_SAME_VA); + if (!reg) + goto no_reg; + + reg->flags &= ~KBASE_REG_FREE; + if (kbase_update_region_flags(kctx, reg, flags) != 0) + goto invalid_flags; + + reg->cpu_alloc = kbase_alloc_create(pages, KBASE_MEM_TYPE_RAW); + if (IS_ERR_OR_NULL(reg->cpu_alloc)) + goto no_alloc; + + reg->gpu_alloc = kbase_mem_phy_alloc_get(reg->cpu_alloc); + + page_array = kbase_get_cpu_phy_pages(reg); + + for (i = 0; i < pages; i++) + page_array[i] = as_tagged(dma_pa + ((dma_addr_t)i << PAGE_SHIFT)); + + reg->cpu_alloc->nents = pages; + + kbase_gpu_vm_lock(kctx); + res = kbase_gpu_mmap(kctx, reg, (uintptr_t) va, pages, 1); + kbase_gpu_vm_unlock(kctx); + if (res) + goto no_mmap; + + return va; + +no_mmap: + kbase_mem_phy_alloc_put(reg->cpu_alloc); + kbase_mem_phy_alloc_put(reg->gpu_alloc); +no_alloc: +invalid_flags: + kfree(reg); +no_reg: +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)) + dma_free_attrs(kctx->kbdev->dev, size, va, dma_pa, attrs); +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)) + dma_free_attrs(kctx->kbdev->dev, size, va, dma_pa, &attrs); +#else + dma_free_writecombine(kctx->kbdev->dev, size, va, dma_pa); +#endif +err: + return NULL; +} +KBASE_EXPORT_SYMBOL(kbase_va_alloc); + +void kbase_va_free(struct kbase_context *kctx, struct kbase_hwc_dma_mapping *handle) +{ + struct kbase_va_region *reg; + int err; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)) && \ + (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) + DEFINE_DMA_ATTRS(attrs); +#endif + + KBASE_DEBUG_ASSERT(kctx != NULL); + KBASE_DEBUG_ASSERT(handle->cpu_va != NULL); + + kbase_gpu_vm_lock(kctx); + reg = kbase_region_tracker_find_region_base_address(kctx, (uintptr_t)handle->cpu_va); + KBASE_DEBUG_ASSERT(reg); + err = kbase_gpu_munmap(kctx, reg); + kbase_gpu_vm_unlock(kctx); + KBASE_DEBUG_ASSERT(!err); + + kbase_mem_phy_alloc_put(reg->cpu_alloc); + kbase_mem_phy_alloc_put(reg->gpu_alloc); + kfree(reg); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)) + dma_free_attrs(kctx->kbdev->dev, handle->size, + handle->cpu_va, handle->dma_pa, DMA_ATTR_WRITE_COMBINE); +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)) + dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); + dma_free_attrs(kctx->kbdev->dev, handle->size, + handle->cpu_va, handle->dma_pa, &attrs); +#else + dma_free_writecombine(kctx->kbdev->dev, handle->size, + handle->cpu_va, handle->dma_pa); +#endif +} +KBASE_EXPORT_SYMBOL(kbase_va_free); + diff --git a/drivers/gpu/arm/midgard/mali_kbase_mem_linux.h b/drivers/gpu/arm/midgard/mali_kbase_mem_linux.h new file mode 100644 index 00000000000000..db35f62a7431fc --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_mem_linux.h @@ -0,0 +1,240 @@ +/* + * + * (C) COPYRIGHT 2010, 2012-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + + +/** + * @file mali_kbase_mem_linux.h + * Base kernel memory APIs, Linux implementation. + */ + +#ifndef _KBASE_MEM_LINUX_H_ +#define _KBASE_MEM_LINUX_H_ + +/** A HWC dump mapping */ +struct kbase_hwc_dma_mapping { + void *cpu_va; + dma_addr_t dma_pa; + size_t size; +}; + +struct kbase_va_region *kbase_mem_alloc(struct kbase_context *kctx, + u64 va_pages, u64 commit_pages, u64 extent, u64 *flags, + u64 *gpu_va); +int kbase_mem_query(struct kbase_context *kctx, u64 gpu_addr, int query, u64 *const pages); +int kbase_mem_import(struct kbase_context *kctx, enum base_mem_import_type type, + void __user *phandle, u32 padding, u64 *gpu_va, u64 *va_pages, + u64 *flags); +u64 kbase_mem_alias(struct kbase_context *kctx, u64 *flags, u64 stride, u64 nents, struct base_mem_aliasing_info *ai, u64 *num_pages); +int kbase_mem_flags_change(struct kbase_context *kctx, u64 gpu_addr, unsigned int flags, unsigned int mask); + +/** + * kbase_mem_commit - Change the physical backing size of a region + * + * @kctx: The kernel context + * @gpu_addr: Handle to the memory region + * @new_pages: Number of physical pages to back the region with + * + * Return: 0 on success or error code + */ +int kbase_mem_commit(struct kbase_context *kctx, u64 gpu_addr, u64 new_pages); + +int kbase_mmap(struct file *file, struct vm_area_struct *vma); + +/** + * kbase_mem_evictable_init - Initialize the Ephemeral memory the eviction + * mechanism. + * @kctx: The kbase context to initialize. + * + * Return: Zero on success or -errno on failure. + */ +int kbase_mem_evictable_init(struct kbase_context *kctx); + +/** + * kbase_mem_evictable_deinit - De-initialize the Ephemeral memory eviction + * mechanism. + * @kctx: The kbase context to de-initialize. + */ +void kbase_mem_evictable_deinit(struct kbase_context *kctx); + +/** + * kbase_mem_grow_gpu_mapping - Grow the GPU mapping of an allocation + * @kctx: Context the region belongs to + * @reg: The GPU region + * @new_pages: The number of pages after the grow + * @old_pages: The number of pages before the grow + * + * Return: 0 on success, -errno on error. + * + * Expand the GPU mapping to encompass the new psychical pages which have + * been added to the allocation. + * + * Note: Caller must be holding the region lock. + */ +int kbase_mem_grow_gpu_mapping(struct kbase_context *kctx, + struct kbase_va_region *reg, + u64 new_pages, u64 old_pages); + +/** + * kbase_mem_evictable_make - Make a physical allocation eligible for eviction + * @gpu_alloc: The physical allocation to make evictable + * + * Return: 0 on success, -errno on error. + * + * Take the provided region and make all the physical pages within it + * reclaimable by the kernel, updating the per-process VM stats as well. + * Remove any CPU mappings (as these can't be removed in the shrinker callback + * as mmap_sem might already be taken) but leave the GPU mapping intact as + * and until the shrinker reclaims the allocation. + * + * Note: Must be called with the region lock of the containing context. + */ +int kbase_mem_evictable_make(struct kbase_mem_phy_alloc *gpu_alloc); + +/** + * kbase_mem_evictable_unmake - Remove a physical allocations eligibility for + * eviction. + * @alloc: The physical allocation to remove eviction eligibility from. + * + * Return: True if the allocation had its backing restored and false if + * it hasn't. + * + * Make the physical pages in the region no longer reclaimable and update the + * per-process stats, if the shrinker has already evicted the memory then + * re-allocate it if the region is still alive. + * + * Note: Must be called with the region lock of the containing context. + */ +bool kbase_mem_evictable_unmake(struct kbase_mem_phy_alloc *alloc); + +struct kbase_vmap_struct { + u64 gpu_addr; + struct kbase_mem_phy_alloc *cpu_alloc; + struct kbase_mem_phy_alloc *gpu_alloc; + struct tagged_addr *cpu_pages; + struct tagged_addr *gpu_pages; + void *addr; + size_t size; + bool sync_needed; +}; + + +/** + * kbase_vmap_prot - Map a GPU VA range into the kernel safely, only if the + * requested access permissions are supported + * @kctx: Context the VA range belongs to + * @gpu_addr: Start address of VA range + * @size: Size of VA range + * @prot_request: Flags indicating how the caller will then access the memory + * @map: Structure to be given to kbase_vunmap() on freeing + * + * Return: Kernel-accessible CPU pointer to the VA range, or NULL on error + * + * Map a GPU VA Range into the kernel. The VA range must be contained within a + * GPU memory region. Appropriate CPU cache-flushing operations are made as + * required, dependent on the CPU mapping for the memory region. + * + * This is safer than using kmap() on the pages directly, + * because the pages here are refcounted to prevent freeing (and hence reuse + * elsewhere in the system) until an kbase_vunmap() + * + * The flags in @prot_request should use KBASE_REG_{CPU,GPU}_{RD,WR}, to check + * whether the region should allow the intended access, and return an error if + * disallowed. This is essential for security of imported memory, particularly + * a user buf from SHM mapped into the process as RO. In that case, write + * access must be checked if the intention is for kernel to write to the + * memory. + * + * The checks are also there to help catch access errors on memory where + * security is not a concern: imported memory that is always RW, and memory + * that was allocated and owned by the process attached to @kctx. In this case, + * it helps to identify memory that was was mapped with the wrong access type. + * + * Note: KBASE_REG_GPU_{RD,WR} flags are currently supported for legacy cases + * where either the security of memory is solely dependent on those flags, or + * when userspace code was expecting only the GPU to access the memory (e.g. HW + * workarounds). + * + * All cache maintenance operations shall be ignored if the + * memory region has been imported. + * + */ +void *kbase_vmap_prot(struct kbase_context *kctx, u64 gpu_addr, size_t size, + unsigned long prot_request, struct kbase_vmap_struct *map); + +/** + * kbase_vmap - Map a GPU VA range into the kernel safely + * @kctx: Context the VA range belongs to + * @gpu_addr: Start address of VA range + * @size: Size of VA range + * @map: Structure to be given to kbase_vunmap() on freeing + * + * Return: Kernel-accessible CPU pointer to the VA range, or NULL on error + * + * Map a GPU VA Range into the kernel. The VA range must be contained within a + * GPU memory region. Appropriate CPU cache-flushing operations are made as + * required, dependent on the CPU mapping for the memory region. + * + * This is safer than using kmap() on the pages directly, + * because the pages here are refcounted to prevent freeing (and hence reuse + * elsewhere in the system) until an kbase_vunmap() + * + * kbase_vmap_prot() should be used in preference, since kbase_vmap() makes no + * checks to ensure the security of e.g. imported user bufs from RO SHM. + * + * Note: All cache maintenance operations shall be ignored if the memory region + * has been imported. + */ +void *kbase_vmap(struct kbase_context *kctx, u64 gpu_addr, size_t size, + struct kbase_vmap_struct *map); + +/** + * kbase_vunmap - Unmap a GPU VA range from the kernel + * @kctx: Context the VA range belongs to + * @map: Structure describing the mapping from the corresponding kbase_vmap() + * call + * + * Unmaps a GPU VA range from the kernel, given its @map structure obtained + * from kbase_vmap(). Appropriate CPU cache-flushing operations are made as + * required, dependent on the CPU mapping for the memory region. + * + * The reference taken on pages during kbase_vmap() is released. + * + * Note: All cache maintenance operations shall be ignored if the memory region + * has been imported. + */ +void kbase_vunmap(struct kbase_context *kctx, struct kbase_vmap_struct *map); + +/** @brief Allocate memory from kernel space and map it onto the GPU + * + * @param kctx The context used for the allocation/mapping + * @param size The size of the allocation in bytes + * @param handle An opaque structure used to contain the state needed to free the memory + * @return the VA for kernel space and GPU MMU + */ +void *kbase_va_alloc(struct kbase_context *kctx, u32 size, struct kbase_hwc_dma_mapping *handle); + +/** @brief Free/unmap memory allocated by kbase_va_alloc + * + * @param kctx The context used for the allocation/mapping + * @param handle An opaque structure returned by the kbase_va_alloc function. + */ +void kbase_va_free(struct kbase_context *kctx, struct kbase_hwc_dma_mapping *handle); + +extern const struct vm_operations_struct kbase_vm_ops; + +#endif /* _KBASE_MEM_LINUX_H_ */ diff --git a/drivers/gpu/arm/midgard/mali_kbase_mem_lowlevel.h b/drivers/gpu/arm/midgard/mali_kbase_mem_lowlevel.h new file mode 100644 index 00000000000000..f4e88491327e71 --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_mem_lowlevel.h @@ -0,0 +1,89 @@ +/* + * + * (C) COPYRIGHT 2012-2014,2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + + +#ifndef _KBASE_MEM_LOWLEVEL_H +#define _KBASE_MEM_LOWLEVEL_H + +#ifndef _KBASE_H_ +#error "Don't include this file directly, use mali_kbase.h instead" +#endif + +#include + +/** + * @brief Flags for kbase_phy_allocator_pages_alloc + */ +#define KBASE_PHY_PAGES_FLAG_DEFAULT (0) /** Default allocation flag */ +#define KBASE_PHY_PAGES_FLAG_CLEAR (1 << 0) /** Clear the pages after allocation */ +#define KBASE_PHY_PAGES_FLAG_POISON (1 << 1) /** Fill the memory with a poison value */ + +#define KBASE_PHY_PAGES_SUPPORTED_FLAGS (KBASE_PHY_PAGES_FLAG_DEFAULT|KBASE_PHY_PAGES_FLAG_CLEAR|KBASE_PHY_PAGES_FLAG_POISON) + +#define KBASE_PHY_PAGES_POISON_VALUE 0xFD /** Value to fill the memory with when KBASE_PHY_PAGES_FLAG_POISON is set */ + +enum kbase_sync_type { + KBASE_SYNC_TO_CPU, + KBASE_SYNC_TO_DEVICE +}; + +struct tagged_addr { phys_addr_t tagged_addr; }; + +#define HUGE_PAGE (1u << 0) +#define HUGE_HEAD (1u << 1) +#define FROM_PARTIAL (1u << 2) + +static inline phys_addr_t as_phys_addr_t(struct tagged_addr t) +{ + return t.tagged_addr & PAGE_MASK; +} + +static inline struct tagged_addr as_tagged(phys_addr_t phys) +{ + struct tagged_addr t; + + t.tagged_addr = phys & PAGE_MASK; + return t; +} + +static inline struct tagged_addr as_tagged_tag(phys_addr_t phys, int tag) +{ + struct tagged_addr t; + + t.tagged_addr = (phys & PAGE_MASK) | (tag & ~PAGE_MASK); + return t; +} + +static inline bool is_huge(struct tagged_addr t) +{ + return t.tagged_addr & HUGE_PAGE; +} + +static inline bool is_huge_head(struct tagged_addr t) +{ + int mask = HUGE_HEAD | HUGE_PAGE; + + return mask == (t.tagged_addr & mask); +} + +static inline bool is_partial(struct tagged_addr t) +{ + return t.tagged_addr & FROM_PARTIAL; +} + +#endif /* _KBASE_LOWLEVEL_H */ diff --git a/drivers/gpu/arm/midgard/mali_kbase_mem_pool.c b/drivers/gpu/arm/midgard/mali_kbase_mem_pool.c new file mode 100644 index 00000000000000..c3a9c0096ae77f --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_mem_pool.c @@ -0,0 +1,667 @@ +/* + * + * (C) COPYRIGHT 2015-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +#include +#include +#include +#include +#include +#include +#include +#include + +#define pool_dbg(pool, format, ...) \ + dev_dbg(pool->kbdev->dev, "%s-pool [%zu/%zu]: " format, \ + (pool->next_pool) ? "kctx" : "kbdev", \ + kbase_mem_pool_size(pool), \ + kbase_mem_pool_max_size(pool), \ + ##__VA_ARGS__) + +#define NOT_DIRTY false +#define NOT_RECLAIMED false + +static inline void kbase_mem_pool_lock(struct kbase_mem_pool *pool) +{ + spin_lock(&pool->pool_lock); +} + +static inline void kbase_mem_pool_unlock(struct kbase_mem_pool *pool) +{ + spin_unlock(&pool->pool_lock); +} + +static size_t kbase_mem_pool_capacity(struct kbase_mem_pool *pool) +{ + ssize_t max_size = kbase_mem_pool_max_size(pool); + ssize_t cur_size = kbase_mem_pool_size(pool); + + return max(max_size - cur_size, (ssize_t)0); +} + +static bool kbase_mem_pool_is_full(struct kbase_mem_pool *pool) +{ + return kbase_mem_pool_size(pool) >= kbase_mem_pool_max_size(pool); +} + +static bool kbase_mem_pool_is_empty(struct kbase_mem_pool *pool) +{ + return kbase_mem_pool_size(pool) == 0; +} + +static void kbase_mem_pool_add_locked(struct kbase_mem_pool *pool, + struct page *p) +{ + lockdep_assert_held(&pool->pool_lock); + + list_add(&p->lru, &pool->page_list); + pool->cur_size++; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 9, 0) + zone_page_state_add(1, page_zone(p), NR_SLAB_RECLAIMABLE_B); +#else + zone_page_state_add(1, page_zone(p), NR_SLAB_RECLAIMABLE); +#endif + + pool_dbg(pool, "added page\n"); +} + +static void kbase_mem_pool_add(struct kbase_mem_pool *pool, struct page *p) +{ + kbase_mem_pool_lock(pool); + kbase_mem_pool_add_locked(pool, p); + kbase_mem_pool_unlock(pool); +} + +static void kbase_mem_pool_add_list_locked(struct kbase_mem_pool *pool, + struct list_head *page_list, size_t nr_pages) +{ + struct page *p; + + lockdep_assert_held(&pool->pool_lock); + + list_for_each_entry(p, page_list, lru) { +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 9, 0) + zone_page_state_add(1, page_zone(p), NR_SLAB_RECLAIMABLE_B); +#else + zone_page_state_add(1, page_zone(p), NR_SLAB_RECLAIMABLE); +#endif + } + + list_splice(page_list, &pool->page_list); + pool->cur_size += nr_pages; + + pool_dbg(pool, "added %zu pages\n", nr_pages); +} + +static void kbase_mem_pool_add_list(struct kbase_mem_pool *pool, + struct list_head *page_list, size_t nr_pages) +{ + kbase_mem_pool_lock(pool); + kbase_mem_pool_add_list_locked(pool, page_list, nr_pages); + kbase_mem_pool_unlock(pool); +} + +static struct page *kbase_mem_pool_remove_locked(struct kbase_mem_pool *pool) +{ + struct page *p; + + lockdep_assert_held(&pool->pool_lock); + + if (kbase_mem_pool_is_empty(pool)) + return NULL; + + p = list_first_entry(&pool->page_list, struct page, lru); + list_del_init(&p->lru); + pool->cur_size--; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 9, 0) + zone_page_state_add(-1, page_zone(p), NR_SLAB_RECLAIMABLE_B); +#else + zone_page_state_add(-1, page_zone(p), NR_SLAB_RECLAIMABLE); +#endif + + pool_dbg(pool, "removed page\n"); + + return p; +} + +static struct page *kbase_mem_pool_remove(struct kbase_mem_pool *pool) +{ + struct page *p; + + kbase_mem_pool_lock(pool); + p = kbase_mem_pool_remove_locked(pool); + kbase_mem_pool_unlock(pool); + + return p; +} + +static void kbase_mem_pool_sync_page(struct kbase_mem_pool *pool, + struct page *p) +{ + struct device *dev = pool->kbdev->dev; + dma_sync_single_for_device(dev, kbase_dma_addr(p), + (PAGE_SIZE << pool->order), DMA_BIDIRECTIONAL); +} + +static void kbase_mem_pool_zero_page(struct kbase_mem_pool *pool, + struct page *p) +{ + int i; + + for (i = 0; i < (1U << pool->order); i++) + clear_highpage(p+i); + + kbase_mem_pool_sync_page(pool, p); +} + +static void kbase_mem_pool_spill(struct kbase_mem_pool *next_pool, + struct page *p) +{ + /* Zero page before spilling */ + kbase_mem_pool_zero_page(next_pool, p); + + kbase_mem_pool_add(next_pool, p); +} + +struct page *kbase_mem_alloc_page(struct kbase_mem_pool *pool) +{ + struct page *p; + gfp_t gfp; + struct device *dev = pool->kbdev->dev; + dma_addr_t dma_addr; + int i; + +#if defined(CONFIG_ARM) && !defined(CONFIG_HAVE_DMA_ATTRS) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0) + /* DMA cache sync fails for HIGHMEM before 3.5 on ARM */ + gfp = GFP_USER | __GFP_ZERO; +#else + gfp = GFP_HIGHUSER | __GFP_ZERO; +#endif + + if (current->flags & PF_KTHREAD) { + /* Don't trigger OOM killer from kernel threads, e.g. when + * growing memory on GPU page fault */ + gfp |= __GFP_NORETRY; + } + + /* don't warn on higer order failures */ + if (pool->order) + gfp |= __GFP_NOWARN; + + p = alloc_pages(gfp, pool->order); + if (!p) + return NULL; + + dma_addr = dma_map_page(dev, p, 0, (PAGE_SIZE << pool->order), + DMA_BIDIRECTIONAL); + if (dma_mapping_error(dev, dma_addr)) { + __free_pages(p, pool->order); + return NULL; + } + + WARN_ON(dma_addr != page_to_phys(p)); + for (i = 0; i < (1u << pool->order); i++) + kbase_set_dma_addr(p+i, dma_addr + PAGE_SIZE * i); + + return p; +} + +static void kbase_mem_pool_free_page(struct kbase_mem_pool *pool, + struct page *p) +{ + struct device *dev = pool->kbdev->dev; + dma_addr_t dma_addr = kbase_dma_addr(p); + int i; + + dma_unmap_page(dev, dma_addr, (PAGE_SIZE << pool->order), + DMA_BIDIRECTIONAL); + for (i = 0; i < (1u << pool->order); i++) + kbase_clear_dma_addr(p+i); + __free_pages(p, pool->order); + + pool_dbg(pool, "freed page to kernel\n"); +} + +static size_t kbase_mem_pool_shrink_locked(struct kbase_mem_pool *pool, + size_t nr_to_shrink) +{ + struct page *p; + size_t i; + + lockdep_assert_held(&pool->pool_lock); + + for (i = 0; i < nr_to_shrink && !kbase_mem_pool_is_empty(pool); i++) { + p = kbase_mem_pool_remove_locked(pool); + kbase_mem_pool_free_page(pool, p); + } + + return i; +} + +static size_t kbase_mem_pool_shrink(struct kbase_mem_pool *pool, + size_t nr_to_shrink) +{ + size_t nr_freed; + + kbase_mem_pool_lock(pool); + nr_freed = kbase_mem_pool_shrink_locked(pool, nr_to_shrink); + kbase_mem_pool_unlock(pool); + + return nr_freed; +} + +int kbase_mem_pool_grow(struct kbase_mem_pool *pool, + size_t nr_to_grow) +{ + struct page *p; + size_t i; + + for (i = 0; i < nr_to_grow; i++) { + p = kbase_mem_alloc_page(pool); + if (!p) + return -ENOMEM; + kbase_mem_pool_add(pool, p); + } + + return 0; +} + +void kbase_mem_pool_trim(struct kbase_mem_pool *pool, size_t new_size) +{ + size_t cur_size; + int err = 0; + + cur_size = kbase_mem_pool_size(pool); + + if (new_size > pool->max_size) + new_size = pool->max_size; + + if (new_size < cur_size) + kbase_mem_pool_shrink(pool, cur_size - new_size); + else if (new_size > cur_size) + err = kbase_mem_pool_grow(pool, new_size - cur_size); + + if (err) { + size_t grown_size = kbase_mem_pool_size(pool); + + dev_warn(pool->kbdev->dev, + "Mem pool not grown to the required size of %zu bytes, grown for additional %zu bytes instead!\n", + (new_size - cur_size), (grown_size - cur_size)); + } +} + +void kbase_mem_pool_set_max_size(struct kbase_mem_pool *pool, size_t max_size) +{ + size_t cur_size; + size_t nr_to_shrink; + + kbase_mem_pool_lock(pool); + + pool->max_size = max_size; + + cur_size = kbase_mem_pool_size(pool); + if (max_size < cur_size) { + nr_to_shrink = cur_size - max_size; + kbase_mem_pool_shrink_locked(pool, nr_to_shrink); + } + + kbase_mem_pool_unlock(pool); +} + + +static unsigned long kbase_mem_pool_reclaim_count_objects(struct shrinker *s, + struct shrink_control *sc) +{ + struct kbase_mem_pool *pool; + + pool = container_of(s, struct kbase_mem_pool, reclaim); + pool_dbg(pool, "reclaim count: %zu\n", kbase_mem_pool_size(pool)); + return kbase_mem_pool_size(pool); +} + +static unsigned long kbase_mem_pool_reclaim_scan_objects(struct shrinker *s, + struct shrink_control *sc) +{ + struct kbase_mem_pool *pool; + unsigned long freed; + + pool = container_of(s, struct kbase_mem_pool, reclaim); + + pool_dbg(pool, "reclaim scan %ld:\n", sc->nr_to_scan); + + freed = kbase_mem_pool_shrink(pool, sc->nr_to_scan); + + pool_dbg(pool, "reclaim freed %ld pages\n", freed); + + return freed; +} + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0) +static int kbase_mem_pool_reclaim_shrink(struct shrinker *s, + struct shrink_control *sc) +{ + if (sc->nr_to_scan == 0) + return kbase_mem_pool_reclaim_count_objects(s, sc); + + return kbase_mem_pool_reclaim_scan_objects(s, sc); +} +#endif + +int kbase_mem_pool_init(struct kbase_mem_pool *pool, + size_t max_size, + size_t order, + struct kbase_device *kbdev, + struct kbase_mem_pool *next_pool) +{ + pool->cur_size = 0; + pool->max_size = max_size; + pool->order = order; + pool->kbdev = kbdev; + pool->next_pool = next_pool; + + spin_lock_init(&pool->pool_lock); + INIT_LIST_HEAD(&pool->page_list); + + /* Register shrinker */ +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0) + pool->reclaim.shrink = kbase_mem_pool_reclaim_shrink; +#else + pool->reclaim.count_objects = kbase_mem_pool_reclaim_count_objects; + pool->reclaim.scan_objects = kbase_mem_pool_reclaim_scan_objects; +#endif + pool->reclaim.seeks = DEFAULT_SEEKS; + /* Kernel versions prior to 3.1 : + * struct shrinker does not define batch */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0) + pool->reclaim.batch = 0; +#endif + register_shrinker(&pool->reclaim); + + pool_dbg(pool, "initialized\n"); + + return 0; +} + +void kbase_mem_pool_term(struct kbase_mem_pool *pool) +{ + struct kbase_mem_pool *next_pool = pool->next_pool; + struct page *p; + size_t nr_to_spill = 0; + LIST_HEAD(spill_list); + int i; + + pool_dbg(pool, "terminate()\n"); + + unregister_shrinker(&pool->reclaim); + + kbase_mem_pool_lock(pool); + pool->max_size = 0; + + if (next_pool && !kbase_mem_pool_is_full(next_pool)) { + /* Spill to next pool (may overspill) */ + nr_to_spill = kbase_mem_pool_capacity(next_pool); + nr_to_spill = min(kbase_mem_pool_size(pool), nr_to_spill); + + /* Zero pages first without holding the next_pool lock */ + for (i = 0; i < nr_to_spill; i++) { + p = kbase_mem_pool_remove_locked(pool); + kbase_mem_pool_zero_page(pool, p); + list_add(&p->lru, &spill_list); + } + } + + while (!kbase_mem_pool_is_empty(pool)) { + /* Free remaining pages to kernel */ + p = kbase_mem_pool_remove_locked(pool); + kbase_mem_pool_free_page(pool, p); + } + + kbase_mem_pool_unlock(pool); + + if (next_pool && nr_to_spill) { + /* Add new page list to next_pool */ + kbase_mem_pool_add_list(next_pool, &spill_list, nr_to_spill); + + pool_dbg(pool, "terminate() spilled %zu pages\n", nr_to_spill); + } + + pool_dbg(pool, "terminated\n"); +} + +struct page *kbase_mem_pool_alloc(struct kbase_mem_pool *pool) +{ + struct page *p; + + do { + pool_dbg(pool, "alloc()\n"); + p = kbase_mem_pool_remove(pool); + + if (p) + return p; + + pool = pool->next_pool; + } while (pool); + + return NULL; +} + +void kbase_mem_pool_free(struct kbase_mem_pool *pool, struct page *p, + bool dirty) +{ + struct kbase_mem_pool *next_pool = pool->next_pool; + + pool_dbg(pool, "free()\n"); + + if (!kbase_mem_pool_is_full(pool)) { + /* Add to our own pool */ + if (dirty) + kbase_mem_pool_sync_page(pool, p); + + kbase_mem_pool_add(pool, p); + } else if (next_pool && !kbase_mem_pool_is_full(next_pool)) { + /* Spill to next pool */ + kbase_mem_pool_spill(next_pool, p); + } else { + /* Free page */ + kbase_mem_pool_free_page(pool, p); + } +} + +int kbase_mem_pool_alloc_pages(struct kbase_mem_pool *pool, size_t nr_4k_pages, + struct tagged_addr *pages, bool partial_allowed) +{ + struct page *p; + size_t nr_from_pool; + size_t i = 0; + int err = -ENOMEM; + size_t nr_pages_internal; + + nr_pages_internal = nr_4k_pages / (1u << (pool->order)); + + if (nr_pages_internal * (1u << pool->order) != nr_4k_pages) + return -EINVAL; + + pool_dbg(pool, "alloc_pages(4k=%zu):\n", nr_4k_pages); + pool_dbg(pool, "alloc_pages(internal=%zu):\n", nr_pages_internal); + + /* Get pages from this pool */ + kbase_mem_pool_lock(pool); + nr_from_pool = min(nr_pages_internal, kbase_mem_pool_size(pool)); + while (nr_from_pool--) { + int j; + p = kbase_mem_pool_remove_locked(pool); + if (pool->order) { + pages[i++] = as_tagged_tag(page_to_phys(p), + HUGE_HEAD | HUGE_PAGE); + for (j = 1; j < (1u << pool->order); j++) + pages[i++] = as_tagged_tag(page_to_phys(p) + + PAGE_SIZE * j, + HUGE_PAGE); + } else { + pages[i++] = as_tagged(page_to_phys(p)); + } + } + kbase_mem_pool_unlock(pool); + + if (i != nr_4k_pages && pool->next_pool) { + /* Allocate via next pool */ + err = kbase_mem_pool_alloc_pages(pool->next_pool, + nr_4k_pages - i, pages + i, partial_allowed); + + if (err < 0) + goto err_rollback; + + i += err; + } else { + /* Get any remaining pages from kernel */ + while (i != nr_4k_pages) { + p = kbase_mem_alloc_page(pool); + if (!p) { + if (partial_allowed) + goto done; + else + goto err_rollback; + } + + if (pool->order) { + int j; + + pages[i++] = as_tagged_tag(page_to_phys(p), + HUGE_PAGE | + HUGE_HEAD); + for (j = 1; j < (1u << pool->order); j++) { + phys_addr_t phys; + + phys = page_to_phys(p) + PAGE_SIZE * j; + pages[i++] = as_tagged_tag(phys, + HUGE_PAGE); + } + } else { + pages[i++] = as_tagged(page_to_phys(p)); + } + } + } + +done: + pool_dbg(pool, "alloc_pages(%zu) done\n", i); + + return i; + +err_rollback: + kbase_mem_pool_free_pages(pool, i, pages, NOT_DIRTY, NOT_RECLAIMED); + return err; +} + +static void kbase_mem_pool_add_array(struct kbase_mem_pool *pool, + size_t nr_pages, struct tagged_addr *pages, + bool zero, bool sync) +{ + struct page *p; + size_t nr_to_pool = 0; + LIST_HEAD(new_page_list); + size_t i; + + if (!nr_pages) + return; + + pool_dbg(pool, "add_array(%zu, zero=%d, sync=%d):\n", + nr_pages, zero, sync); + + /* Zero/sync pages first without holding the pool lock */ + for (i = 0; i < nr_pages; i++) { + if (unlikely(!as_phys_addr_t(pages[i]))) + continue; + + if (is_huge_head(pages[i]) || !is_huge(pages[i])) { + p = phys_to_page(as_phys_addr_t(pages[i])); + if (zero) + kbase_mem_pool_zero_page(pool, p); + else if (sync) + kbase_mem_pool_sync_page(pool, p); + + list_add(&p->lru, &new_page_list); + nr_to_pool++; + } + pages[i] = as_tagged(0); + } + + /* Add new page list to pool */ + kbase_mem_pool_add_list(pool, &new_page_list, nr_to_pool); + + pool_dbg(pool, "add_array(%zu) added %zu pages\n", + nr_pages, nr_to_pool); +} + +void kbase_mem_pool_free_pages(struct kbase_mem_pool *pool, size_t nr_pages, + struct tagged_addr *pages, bool dirty, bool reclaimed) +{ + struct kbase_mem_pool *next_pool = pool->next_pool; + struct page *p; + size_t nr_to_pool; + LIST_HEAD(to_pool_list); + size_t i = 0; + + pool_dbg(pool, "free_pages(%zu):\n", nr_pages); + + if (!reclaimed) { + /* Add to this pool */ + nr_to_pool = kbase_mem_pool_capacity(pool); + nr_to_pool = min(nr_pages, nr_to_pool); + + kbase_mem_pool_add_array(pool, nr_to_pool, pages, false, dirty); + + i += nr_to_pool; + + if (i != nr_pages && next_pool) { + /* Spill to next pool (may overspill) */ + nr_to_pool = kbase_mem_pool_capacity(next_pool); + nr_to_pool = min(nr_pages - i, nr_to_pool); + + kbase_mem_pool_add_array(next_pool, nr_to_pool, + pages + i, true, dirty); + i += nr_to_pool; + } + } + + /* Free any remaining pages to kernel */ + for (; i < nr_pages; i++) { + if (unlikely(!as_phys_addr_t(pages[i]))) + continue; + + if (is_huge(pages[i]) && !is_huge_head(pages[i])) { + pages[i] = as_tagged(0); + continue; + } + + p = phys_to_page(as_phys_addr_t(pages[i])); + + if (reclaimed) + zone_page_state_add(-1, page_zone(p), +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 9, 0) + NR_SLAB_RECLAIMABLE_B); +#else + NR_SLAB_RECLAIMABLE); +#endif + + kbase_mem_pool_free_page(pool, p); + pages[i] = as_tagged(0); + } + + pool_dbg(pool, "free_pages(%zu) done\n", nr_pages); +} diff --git a/drivers/gpu/arm/midgard/mali_kbase_mem_pool_debugfs.c b/drivers/gpu/arm/midgard/mali_kbase_mem_pool_debugfs.c new file mode 100644 index 00000000000000..319cf2568aba45 --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_mem_pool_debugfs.c @@ -0,0 +1,88 @@ +/* + * + * (C) COPYRIGHT 2014-2015, 2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +#include +#include + +#include + +#ifdef CONFIG_DEBUG_FS + +static int kbase_mem_pool_debugfs_size_get(void *data, u64 *val) +{ + struct kbase_mem_pool *pool = (struct kbase_mem_pool *)data; + + *val = kbase_mem_pool_size(pool); + + return 0; +} + +static int kbase_mem_pool_debugfs_size_set(void *data, u64 val) +{ + struct kbase_mem_pool *pool = (struct kbase_mem_pool *)data; + + kbase_mem_pool_trim(pool, val); + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(kbase_mem_pool_debugfs_size_fops, + kbase_mem_pool_debugfs_size_get, + kbase_mem_pool_debugfs_size_set, + "%llu\n"); + +static int kbase_mem_pool_debugfs_max_size_get(void *data, u64 *val) +{ + struct kbase_mem_pool *pool = (struct kbase_mem_pool *)data; + + *val = kbase_mem_pool_max_size(pool); + + return 0; +} + +static int kbase_mem_pool_debugfs_max_size_set(void *data, u64 val) +{ + struct kbase_mem_pool *pool = (struct kbase_mem_pool *)data; + + kbase_mem_pool_set_max_size(pool, val); + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(kbase_mem_pool_debugfs_max_size_fops, + kbase_mem_pool_debugfs_max_size_get, + kbase_mem_pool_debugfs_max_size_set, + "%llu\n"); + +void kbase_mem_pool_debugfs_init(struct dentry *parent, + struct kbase_mem_pool *pool, + struct kbase_mem_pool *lp_pool) +{ + debugfs_create_file("mem_pool_size", S_IRUGO | S_IWUSR, parent, + pool, &kbase_mem_pool_debugfs_size_fops); + + debugfs_create_file("mem_pool_max_size", S_IRUGO | S_IWUSR, parent, + pool, &kbase_mem_pool_debugfs_max_size_fops); + + debugfs_create_file("lp_mem_pool_size", S_IRUGO | S_IWUSR, parent, + lp_pool, &kbase_mem_pool_debugfs_size_fops); + + debugfs_create_file("lp_mem_pool_max_size", S_IRUGO | S_IWUSR, parent, + lp_pool, &kbase_mem_pool_debugfs_max_size_fops); +} + +#endif /* CONFIG_DEBUG_FS */ diff --git a/drivers/gpu/arm/midgard/mali_kbase_mem_pool_debugfs.h b/drivers/gpu/arm/midgard/mali_kbase_mem_pool_debugfs.h new file mode 100644 index 00000000000000..496eaf3f1e1aeb --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_mem_pool_debugfs.h @@ -0,0 +1,40 @@ +/* + * + * (C) COPYRIGHT 2014-2015, 2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +#ifndef _KBASE_MEM_POOL_DEBUGFS_H +#define _KBASE_MEM_POOL_DEBUGFS_H + +#include + +/** + * kbase_mem_pool_debugfs_init - add debugfs knobs for @pool + * @parent: Parent debugfs dentry + * @pool: Memory pool of small pages to control + * @lp_pool: Memory pool of large pages to control + * + * Adds four debugfs files under @parent: + * - mem_pool_size: get/set the current size of @pool + * - mem_pool_max_size: get/set the max size of @pool + * - lp_mem_pool_size: get/set the current size of @lp_pool + * - lp_mem_pool_max_size: get/set the max size of @lp_pool + */ +void kbase_mem_pool_debugfs_init(struct dentry *parent, + struct kbase_mem_pool *pool, + struct kbase_mem_pool *lp_pool); + +#endif /*_KBASE_MEM_POOL_DEBUGFS_H*/ + diff --git a/drivers/gpu/arm/midgard/mali_kbase_mem_profile_debugfs.c b/drivers/gpu/arm/midgard/mali_kbase_mem_profile_debugfs.c new file mode 100644 index 00000000000000..d58fd8d62fdec9 --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_mem_profile_debugfs.c @@ -0,0 +1,121 @@ +/* + * + * (C) COPYRIGHT 2012-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +#include + +#ifdef CONFIG_DEBUG_FS + +/** Show callback for the @c mem_profile debugfs file. + * + * This function is called to get the contents of the @c mem_profile debugfs + * file. This is a report of current memory usage and distribution in userspace. + * + * @param sfile The debugfs entry + * @param data Data associated with the entry + * + * @return 0 if it successfully prints data in debugfs entry file, non-zero otherwise + */ +static int kbasep_mem_profile_seq_show(struct seq_file *sfile, void *data) +{ + struct kbase_context *kctx = sfile->private; + + mutex_lock(&kctx->mem_profile_lock); + + seq_write(sfile, kctx->mem_profile_data, kctx->mem_profile_size); + + seq_putc(sfile, '\n'); + + mutex_unlock(&kctx->mem_profile_lock); + + return 0; +} + +/* + * File operations related to debugfs entry for mem_profile + */ +static int kbasep_mem_profile_debugfs_open(struct inode *in, struct file *file) +{ + return single_open(file, kbasep_mem_profile_seq_show, in->i_private); +} + +static const struct file_operations kbasep_mem_profile_debugfs_fops = { + .open = kbasep_mem_profile_debugfs_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +int kbasep_mem_profile_debugfs_insert(struct kbase_context *kctx, char *data, + size_t size) +{ + int err = 0; + + mutex_lock(&kctx->mem_profile_lock); + + dev_dbg(kctx->kbdev->dev, "initialised: %d", + kbase_ctx_flag(kctx, KCTX_MEM_PROFILE_INITIALIZED)); + + if (!kbase_ctx_flag(kctx, KCTX_MEM_PROFILE_INITIALIZED)) { + if (!debugfs_create_file("mem_profile", S_IRUGO, + kctx->kctx_dentry, kctx, + &kbasep_mem_profile_debugfs_fops)) { + err = -EAGAIN; + } else { + kbase_ctx_flag_set(kctx, + KCTX_MEM_PROFILE_INITIALIZED); + } + } + + if (kbase_ctx_flag(kctx, KCTX_MEM_PROFILE_INITIALIZED)) { + kfree(kctx->mem_profile_data); + kctx->mem_profile_data = data; + kctx->mem_profile_size = size; + } else { + kfree(data); + } + + dev_dbg(kctx->kbdev->dev, "returning: %d, initialised: %d", + err, kbase_ctx_flag(kctx, KCTX_MEM_PROFILE_INITIALIZED)); + + mutex_unlock(&kctx->mem_profile_lock); + + return err; +} + +void kbasep_mem_profile_debugfs_remove(struct kbase_context *kctx) +{ + mutex_lock(&kctx->mem_profile_lock); + + dev_dbg(kctx->kbdev->dev, "initialised: %d", + kbase_ctx_flag(kctx, KCTX_MEM_PROFILE_INITIALIZED)); + + kfree(kctx->mem_profile_data); + kctx->mem_profile_data = NULL; + kctx->mem_profile_size = 0; + + mutex_unlock(&kctx->mem_profile_lock); +} + +#else /* CONFIG_DEBUG_FS */ + +int kbasep_mem_profile_debugfs_insert(struct kbase_context *kctx, char *data, + size_t size) +{ + kfree(data); + return 0; +} +#endif /* CONFIG_DEBUG_FS */ diff --git a/drivers/gpu/arm/midgard/mali_kbase_mem_profile_debugfs.h b/drivers/gpu/arm/midgard/mali_kbase_mem_profile_debugfs.h new file mode 100644 index 00000000000000..a1dc2e0b165b9b --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_mem_profile_debugfs.h @@ -0,0 +1,59 @@ +/* + * + * (C) COPYRIGHT 2012-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + + +/** + * @file mali_kbase_mem_profile_debugfs.h + * Header file for mem profiles entries in debugfs + * + */ + +#ifndef _KBASE_MEM_PROFILE_DEBUGFS_H +#define _KBASE_MEM_PROFILE_DEBUGFS_H + +#include +#include + +/** + * @brief Remove entry from Mali memory profile debugfs + */ +void kbasep_mem_profile_debugfs_remove(struct kbase_context *kctx); + +/** + * @brief Insert @p data to the debugfs file so it can be read by userspace + * + * The function takes ownership of @p data and frees it later when new data + * is inserted. + * + * If the debugfs entry corresponding to the @p kctx doesn't exist, + * an attempt will be made to create it. + * + * @param kctx The context whose debugfs file @p data should be inserted to + * @param data A NULL-terminated string to be inserted to the debugfs file, + * without the trailing new line character + * @param size The length of the @p data string + * @return 0 if @p data inserted correctly + * -EAGAIN in case of error + * @post @ref mem_profile_initialized will be set to @c true + * the first time this function succeeds. + */ +int kbasep_mem_profile_debugfs_insert(struct kbase_context *kctx, char *data, + size_t size); + +#endif /*_KBASE_MEM_PROFILE_DEBUGFS_H*/ + diff --git a/drivers/gpu/arm/midgard/mali_kbase_mem_profile_debugfs_buf_size.h b/drivers/gpu/arm/midgard/mali_kbase_mem_profile_debugfs_buf_size.h new file mode 100644 index 00000000000000..82f0702974c202 --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_mem_profile_debugfs_buf_size.h @@ -0,0 +1,33 @@ +/* + * + * (C) COPYRIGHT 2014 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +/** + * @file mali_kbase_mem_profile_debugfs_buf_size.h + * Header file for the size of the buffer to accumulate the histogram report text in + */ + +#ifndef _KBASE_MEM_PROFILE_DEBUGFS_BUF_SIZE_H_ +#define _KBASE_MEM_PROFILE_DEBUGFS_BUF_SIZE_H_ + +/** + * The size of the buffer to accumulate the histogram report text in + * @see @ref CCTXP_HIST_BUF_SIZE_MAX_LENGTH_REPORT + */ +#define KBASE_MEM_PROFILE_MAX_BUF_SIZE ((size_t) (64 + ((80 + (56 * 64)) * 15) + 56)) + +#endif /*_KBASE_MEM_PROFILE_DEBUGFS_BUF_SIZE_H_*/ + diff --git a/drivers/gpu/arm/midgard/mali_kbase_mmu.c b/drivers/gpu/arm/midgard/mali_kbase_mmu.c new file mode 100644 index 00000000000000..16947795a481be --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_mmu.c @@ -0,0 +1,2149 @@ +/* + * + * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + + +/** + * @file mali_kbase_mmu.c + * Base kernel MMU management. + */ + +/* #define DEBUG 1 */ +#include +#include +#include +#include +#if defined(CONFIG_MALI_GATOR_SUPPORT) +#include +#endif +#include +#include +#include + +#define beenthere(kctx, f, a...) dev_dbg(kctx->kbdev->dev, "%s:" f, __func__, ##a) + +#include +#include +#include +#include +#include +#include + +#define KBASE_MMU_PAGE_ENTRIES 512 + +/** + * kbase_mmu_flush_invalidate() - Flush and invalidate the GPU caches. + * @kctx: The KBase context. + * @vpfn: The virtual page frame number to start the flush on. + * @nr: The number of pages to flush. + * @sync: Set if the operation should be synchronous or not. + * + * Issue a cache flush + invalidate to the GPU caches and invalidate the TLBs. + * + * If sync is not set then transactions still in flight when the flush is issued + * may use the old page tables and the data they write will not be written out + * to memory, this function returns after the flush has been issued but + * before all accesses which might effect the flushed region have completed. + * + * If sync is set then accesses in the flushed region will be drained + * before data is flush and invalidated through L1, L2 and into memory, + * after which point this function will return. + */ +static void kbase_mmu_flush_invalidate(struct kbase_context *kctx, + u64 vpfn, size_t nr, bool sync); + +/** + * kbase_mmu_sync_pgd - sync page directory to memory + * @kbdev: Device pointer. + * @handle: Address of DMA region. + * @size: Size of the region to sync. + * + * This should be called after each page directory update. + */ + +static void kbase_mmu_sync_pgd(struct kbase_device *kbdev, + dma_addr_t handle, size_t size) +{ + /* If page table is not coherent then ensure the gpu can read + * the pages from memory + */ + if (kbdev->system_coherency != COHERENCY_ACE) + dma_sync_single_for_device(kbdev->dev, handle, size, + DMA_TO_DEVICE); +} + +/* + * Definitions: + * - PGD: Page Directory. + * - PTE: Page Table Entry. A 64bit value pointing to the next + * level of translation + * - ATE: Address Transation Entry. A 64bit value pointing to + * a 4kB physical page. + */ + +static void kbase_mmu_report_fault_and_kill(struct kbase_context *kctx, + struct kbase_as *as, const char *reason_str); + + +/** + * make_multiple() - Calculate the nearest integral multiple of a given number. + * + * @minimum: The number to round up. + * @multiple: The number of which the function shall calculate an integral multiple. + * @result: The number rounded up to the nearest integral multiple in case of success, + * or just the number itself in case of failure. + * + * Return: 0 in case of success, or -1 in case of failure. + */ +static int make_multiple(size_t minimum, size_t multiple, size_t *result) +{ + int err = -1; + + *result = minimum; + + if (multiple > 0) { + size_t remainder = minimum % multiple; + + if (remainder != 0) + *result = minimum + multiple - remainder; + err = 0; + } + + return err; +} + +void page_fault_worker(struct work_struct *data) +{ + u64 fault_pfn; + u32 fault_status; + size_t new_pages; + size_t fault_rel_pfn; + struct kbase_as *faulting_as; + int as_no; + struct kbase_context *kctx; + struct kbase_device *kbdev; + struct kbase_va_region *region; + int err; + bool grown = false; + + faulting_as = container_of(data, struct kbase_as, work_pagefault); + fault_pfn = faulting_as->fault_addr >> PAGE_SHIFT; + as_no = faulting_as->number; + + kbdev = container_of(faulting_as, struct kbase_device, as[as_no]); + + /* Grab the context that was already refcounted in kbase_mmu_interrupt(). + * Therefore, it cannot be scheduled out of this AS until we explicitly release it + */ + kctx = kbasep_js_runpool_lookup_ctx_noretain(kbdev, as_no); + if (WARN_ON(!kctx)) { + atomic_dec(&kbdev->faults_pending); + return; + } + + KBASE_DEBUG_ASSERT(kctx->kbdev == kbdev); + + if (unlikely(faulting_as->protected_mode)) + { + kbase_mmu_report_fault_and_kill(kctx, faulting_as, + "Protected mode fault"); + kbase_mmu_hw_clear_fault(kbdev, faulting_as, kctx, + KBASE_MMU_FAULT_TYPE_PAGE); + + goto fault_done; + } + + fault_status = faulting_as->fault_status; + switch (fault_status & AS_FAULTSTATUS_EXCEPTION_CODE_MASK) { + + case AS_FAULTSTATUS_EXCEPTION_CODE_TRANSLATION_FAULT: + /* need to check against the region to handle this one */ + break; + + case AS_FAULTSTATUS_EXCEPTION_CODE_PERMISSION_FAULT: + kbase_mmu_report_fault_and_kill(kctx, faulting_as, + "Permission failure"); + goto fault_done; + + case AS_FAULTSTATUS_EXCEPTION_CODE_TRANSTAB_BUS_FAULT: + kbase_mmu_report_fault_and_kill(kctx, faulting_as, + "Translation table bus fault"); + goto fault_done; + + case AS_FAULTSTATUS_EXCEPTION_CODE_ACCESS_FLAG: + /* nothing to do, but we don't expect this fault currently */ + dev_warn(kbdev->dev, "Access flag unexpectedly set"); + goto fault_done; + + case AS_FAULTSTATUS_EXCEPTION_CODE_ADDRESS_SIZE_FAULT: + if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_AARCH64_MMU)) + kbase_mmu_report_fault_and_kill(kctx, faulting_as, + "Address size fault"); + else + kbase_mmu_report_fault_and_kill(kctx, faulting_as, + "Unknown fault code"); + goto fault_done; + + case AS_FAULTSTATUS_EXCEPTION_CODE_MEMORY_ATTRIBUTES_FAULT: + if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_AARCH64_MMU)) + kbase_mmu_report_fault_and_kill(kctx, faulting_as, + "Memory attributes fault"); + else + kbase_mmu_report_fault_and_kill(kctx, faulting_as, + "Unknown fault code"); + goto fault_done; + + default: + kbase_mmu_report_fault_and_kill(kctx, faulting_as, + "Unknown fault code"); + goto fault_done; + } + + /* so we have a translation fault, let's see if it is for growable + * memory */ + kbase_gpu_vm_lock(kctx); + + region = kbase_region_tracker_find_region_enclosing_address(kctx, + faulting_as->fault_addr); + if (!region || region->flags & KBASE_REG_FREE) { + kbase_gpu_vm_unlock(kctx); + kbase_mmu_report_fault_and_kill(kctx, faulting_as, + "Memory is not mapped on the GPU"); + goto fault_done; + } + + if (region->gpu_alloc->type == KBASE_MEM_TYPE_IMPORTED_UMM) { + kbase_gpu_vm_unlock(kctx); + kbase_mmu_report_fault_and_kill(kctx, faulting_as, + "DMA-BUF is not mapped on the GPU"); + goto fault_done; + } + + if ((region->flags & GROWABLE_FLAGS_REQUIRED) + != GROWABLE_FLAGS_REQUIRED) { + kbase_gpu_vm_unlock(kctx); + kbase_mmu_report_fault_and_kill(kctx, faulting_as, + "Memory is not growable"); + goto fault_done; + } + + if ((region->flags & KBASE_REG_DONT_NEED)) { + kbase_gpu_vm_unlock(kctx); + kbase_mmu_report_fault_and_kill(kctx, faulting_as, + "Don't need memory can't be grown"); + goto fault_done; + } + + /* find the size we need to grow it by */ + /* we know the result fit in a size_t due to kbase_region_tracker_find_region_enclosing_address + * validating the fault_adress to be within a size_t from the start_pfn */ + fault_rel_pfn = fault_pfn - region->start_pfn; + + if (fault_rel_pfn < kbase_reg_current_backed_size(region)) { + dev_dbg(kbdev->dev, "Page fault @ 0x%llx in allocated region 0x%llx-0x%llx of growable TMEM: Ignoring", + faulting_as->fault_addr, region->start_pfn, + region->start_pfn + + kbase_reg_current_backed_size(region)); + + mutex_lock(&kbdev->mmu_hw_mutex); + + kbase_mmu_hw_clear_fault(kbdev, faulting_as, kctx, + KBASE_MMU_FAULT_TYPE_PAGE); + /* [1] in case another page fault occurred while we were + * handling the (duplicate) page fault we need to ensure we + * don't loose the other page fault as result of us clearing + * the MMU IRQ. Therefore, after we clear the MMU IRQ we send + * an UNLOCK command that will retry any stalled memory + * transaction (which should cause the other page fault to be + * raised again). + */ + kbase_mmu_hw_do_operation(kbdev, faulting_as, NULL, 0, 0, + AS_COMMAND_UNLOCK, 1); + + mutex_unlock(&kbdev->mmu_hw_mutex); + + kbase_mmu_hw_enable_fault(kbdev, faulting_as, kctx, + KBASE_MMU_FAULT_TYPE_PAGE); + kbase_gpu_vm_unlock(kctx); + + goto fault_done; + } + + err = make_multiple(fault_rel_pfn - kbase_reg_current_backed_size(region) + 1, + region->extent, + &new_pages); + WARN_ON(err); + + /* cap to max vsize */ + new_pages = min(new_pages, region->nr_pages - kbase_reg_current_backed_size(region)); + + if (0 == new_pages) { + mutex_lock(&kbdev->mmu_hw_mutex); + + /* Duplicate of a fault we've already handled, nothing to do */ + kbase_mmu_hw_clear_fault(kbdev, faulting_as, kctx, + KBASE_MMU_FAULT_TYPE_PAGE); + /* See comment [1] about UNLOCK usage */ + kbase_mmu_hw_do_operation(kbdev, faulting_as, NULL, 0, 0, + AS_COMMAND_UNLOCK, 1); + + mutex_unlock(&kbdev->mmu_hw_mutex); + + kbase_mmu_hw_enable_fault(kbdev, faulting_as, kctx, + KBASE_MMU_FAULT_TYPE_PAGE); + kbase_gpu_vm_unlock(kctx); + goto fault_done; + } + + if (kbase_alloc_phy_pages_helper(region->gpu_alloc, new_pages) == 0) { + if (region->gpu_alloc != region->cpu_alloc) { + if (kbase_alloc_phy_pages_helper( + region->cpu_alloc, new_pages) == 0) { + grown = true; + } else { + kbase_free_phy_pages_helper(region->gpu_alloc, + new_pages); + } + } else { + grown = true; + } + } + + + if (grown) { + u64 pfn_offset; + u32 op; + + /* alloc success */ + KBASE_DEBUG_ASSERT(kbase_reg_current_backed_size(region) <= region->nr_pages); + + /* set up the new pages */ + pfn_offset = kbase_reg_current_backed_size(region) - new_pages; + /* + * Note: + * Issuing an MMU operation will unlock the MMU and cause the + * translation to be replayed. If the page insertion fails then + * rather then trying to continue the context should be killed + * so the no_flush version of insert_pages is used which allows + * us to unlock the MMU as we see fit. + */ + err = kbase_mmu_insert_pages_no_flush(kctx, + region->start_pfn + pfn_offset, + &kbase_get_gpu_phy_pages(region)[pfn_offset], + new_pages, region->flags); + if (err) { + kbase_free_phy_pages_helper(region->gpu_alloc, new_pages); + if (region->gpu_alloc != region->cpu_alloc) + kbase_free_phy_pages_helper(region->cpu_alloc, + new_pages); + kbase_gpu_vm_unlock(kctx); + /* The locked VA region will be unlocked and the cache invalidated in here */ + kbase_mmu_report_fault_and_kill(kctx, faulting_as, + "Page table update failure"); + goto fault_done; + } +#if defined(CONFIG_MALI_GATOR_SUPPORT) + kbase_trace_mali_page_fault_insert_pages(as_no, new_pages); +#endif + KBASE_TLSTREAM_AUX_PAGEFAULT(kctx->id, (u64)new_pages); + + /* AS transaction begin */ + mutex_lock(&kbdev->mmu_hw_mutex); + + /* flush L2 and unlock the VA (resumes the MMU) */ + if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_6367)) + op = AS_COMMAND_FLUSH; + else + op = AS_COMMAND_FLUSH_PT; + + /* clear MMU interrupt - this needs to be done after updating + * the page tables but before issuing a FLUSH command. The + * FLUSH cmd has a side effect that it restarts stalled memory + * transactions in other address spaces which may cause + * another fault to occur. If we didn't clear the interrupt at + * this stage a new IRQ might not be raised when the GPU finds + * a MMU IRQ is already pending. + */ + kbase_mmu_hw_clear_fault(kbdev, faulting_as, kctx, + KBASE_MMU_FAULT_TYPE_PAGE); + + kbase_mmu_hw_do_operation(kbdev, faulting_as, kctx, + faulting_as->fault_addr >> PAGE_SHIFT, + new_pages, + op, 1); + + mutex_unlock(&kbdev->mmu_hw_mutex); + /* AS transaction end */ + + /* reenable this in the mask */ + kbase_mmu_hw_enable_fault(kbdev, faulting_as, kctx, + KBASE_MMU_FAULT_TYPE_PAGE); + kbase_gpu_vm_unlock(kctx); + } else { + /* failed to extend, handle as a normal PF */ + kbase_gpu_vm_unlock(kctx); + kbase_mmu_report_fault_and_kill(kctx, faulting_as, + "Page allocation failure"); + } + +fault_done: + /* + * By this point, the fault was handled in some way, + * so release the ctx refcount + */ + kbasep_js_runpool_release_ctx(kbdev, kctx); + + atomic_dec(&kbdev->faults_pending); +} + +phys_addr_t kbase_mmu_alloc_pgd(struct kbase_context *kctx) +{ + u64 *page; + int i; + struct page *p; + int new_page_count __maybe_unused; + + KBASE_DEBUG_ASSERT(NULL != kctx); + new_page_count = kbase_atomic_add_pages(1, &kctx->used_pages); + kbase_atomic_add_pages(1, &kctx->kbdev->memdev.used_pages); + + p = kbase_mem_pool_alloc(&kctx->mem_pool); + if (!p) + goto sub_pages; + + KBASE_TLSTREAM_AUX_PAGESALLOC( + kctx->id, + (u64)new_page_count); + + page = kmap(p); + if (NULL == page) + goto alloc_free; + + kbase_process_page_usage_inc(kctx, 1); + + for (i = 0; i < KBASE_MMU_PAGE_ENTRIES; i++) + kctx->kbdev->mmu_mode->entry_invalidate(&page[i]); + + kbase_mmu_sync_pgd(kctx->kbdev, kbase_dma_addr(p), PAGE_SIZE); + + kunmap(p); + return page_to_phys(p); + +alloc_free: + kbase_mem_pool_free(&kctx->mem_pool, p, false); +sub_pages: + kbase_atomic_sub_pages(1, &kctx->used_pages); + kbase_atomic_sub_pages(1, &kctx->kbdev->memdev.used_pages); + + return 0; +} + +KBASE_EXPORT_TEST_API(kbase_mmu_alloc_pgd); + +/* Given PGD PFN for level N, return PGD PFN for level N+1, allocating the + * new table from the pool if needed and possible + */ +static int mmu_get_next_pgd(struct kbase_context *kctx, + phys_addr_t *pgd, u64 vpfn, int level) +{ + u64 *page; + phys_addr_t target_pgd; + struct page *p; + + KBASE_DEBUG_ASSERT(*pgd); + KBASE_DEBUG_ASSERT(NULL != kctx); + + lockdep_assert_held(&kctx->mmu_lock); + + /* + * Architecture spec defines level-0 as being the top-most. + * This is a bit unfortunate here, but we keep the same convention. + */ + vpfn >>= (3 - level) * 9; + vpfn &= 0x1FF; + + p = pfn_to_page(PFN_DOWN(*pgd)); + page = kmap(p); + if (NULL == page) { + dev_warn(kctx->kbdev->dev, "mmu_get_next_pgd: kmap failure\n"); + return -EINVAL; + } + + target_pgd = kctx->kbdev->mmu_mode->pte_to_phy_addr(page[vpfn]); + + if (!target_pgd) { + target_pgd = kbase_mmu_alloc_pgd(kctx); + if (!target_pgd) { + dev_dbg(kctx->kbdev->dev, "mmu_get_next_pgd: kbase_mmu_alloc_pgd failure\n"); + kunmap(p); + return -ENOMEM; + } + + kctx->kbdev->mmu_mode->entry_set_pte(&page[vpfn], target_pgd); + + kbase_mmu_sync_pgd(kctx->kbdev, kbase_dma_addr(p), PAGE_SIZE); + /* Rely on the caller to update the address space flags. */ + } + + kunmap(p); + *pgd = target_pgd; + + return 0; +} + +/* + * Returns the PGD for the specified level of translation + */ +static int mmu_get_pgd_at_level(struct kbase_context *kctx, + u64 vpfn, + unsigned int level, + phys_addr_t *out_pgd) +{ + phys_addr_t pgd; + int l; + + lockdep_assert_held(&kctx->mmu_lock); + pgd = kctx->pgd; + + for (l = MIDGARD_MMU_TOPLEVEL; l < level; l++) { + int err = mmu_get_next_pgd(kctx, &pgd, vpfn, l); + /* Handle failure condition */ + if (err) { + dev_dbg(kctx->kbdev->dev, + "%s: mmu_get_next_pgd failure at level %d\n", + __func__, l); + return err; + } + } + + *out_pgd = pgd; + + return 0; +} + +#define mmu_get_bottom_pgd(kctx, vpfn, out_pgd) \ + mmu_get_pgd_at_level((kctx), (vpfn), MIDGARD_MMU_BOTTOMLEVEL, (out_pgd)) + + +static void mmu_insert_pages_failure_recovery(struct kbase_context *kctx, + u64 from_vpfn, u64 to_vpfn) +{ + phys_addr_t pgd; + u64 vpfn = from_vpfn; + struct kbase_mmu_mode const *mmu_mode; + + KBASE_DEBUG_ASSERT(NULL != kctx); + KBASE_DEBUG_ASSERT(0 != vpfn); + /* 64-bit address range is the max */ + KBASE_DEBUG_ASSERT(vpfn <= (U64_MAX / PAGE_SIZE)); + KBASE_DEBUG_ASSERT(from_vpfn <= to_vpfn); + + lockdep_assert_held(&kctx->mmu_lock); + lockdep_assert_held(&kctx->reg_lock); + + mmu_mode = kctx->kbdev->mmu_mode; + + while (vpfn < to_vpfn) { + unsigned int i; + unsigned int idx = vpfn & 0x1FF; + unsigned int count = KBASE_MMU_PAGE_ENTRIES - idx; + unsigned int pcount = 0; + unsigned int left = to_vpfn - vpfn; + unsigned int level; + u64 *page; + + if (count > left) + count = left; + + /* need to check if this is a 2MB page or a 4kB */ + pgd = kctx->pgd; + + for (level = MIDGARD_MMU_TOPLEVEL; + level <= MIDGARD_MMU_BOTTOMLEVEL; level++) { + idx = (vpfn >> ((3 - level) * 9)) & 0x1FF; + page = kmap(phys_to_page(pgd)); + if (mmu_mode->ate_is_valid(page[idx], level)) + break; /* keep the mapping */ + kunmap(phys_to_page(pgd)); + pgd = mmu_mode->pte_to_phy_addr(page[idx]); + } + + switch (level) { + case MIDGARD_MMU_LEVEL(2): + /* remap to single entry to update */ + pcount = 1; + break; + case MIDGARD_MMU_BOTTOMLEVEL: + /* page count is the same as the logical count */ + pcount = count; + break; + default: + dev_warn(kctx->kbdev->dev, "%sNo support for ATEs at level %d\n", + __func__, level); + goto next; + } + + /* Invalidate the entries we added */ + for (i = 0; i < pcount; i++) + mmu_mode->entry_invalidate(&page[idx + i]); + + kbase_mmu_sync_pgd(kctx->kbdev, + kbase_dma_addr(phys_to_page(pgd)) + 8 * idx, + 8 * pcount); + kunmap(phys_to_page(pgd)); + +next: + vpfn += count; + } +} + +/* + * Map the single page 'phys' 'nr' of times, starting at GPU PFN 'vpfn' + */ +int kbase_mmu_insert_single_page(struct kbase_context *kctx, u64 vpfn, + struct tagged_addr phys, size_t nr, + unsigned long flags) +{ + phys_addr_t pgd; + u64 *pgd_page; + /* In case the insert_single_page only partially completes we need to be + * able to recover */ + bool recover_required = false; + u64 recover_vpfn = vpfn; + size_t recover_count = 0; + size_t remain = nr; + int err; + struct kbase_mmu_mode const *mmu_mode; + + KBASE_DEBUG_ASSERT(NULL != kctx); + KBASE_DEBUG_ASSERT(0 != vpfn); + /* 64-bit address range is the max */ + KBASE_DEBUG_ASSERT(vpfn <= (U64_MAX / PAGE_SIZE)); + + mmu_mode = kctx->kbdev->mmu_mode; + + /* Early out if there is nothing to do */ + if (nr == 0) + return 0; + + mutex_lock(&kctx->mmu_lock); + + while (remain) { + unsigned int i; + unsigned int index = vpfn & 0x1FF; + unsigned int count = KBASE_MMU_PAGE_ENTRIES - index; + struct page *p; + + if (count > remain) + count = remain; + + /* + * Repeatedly calling mmu_get_bottom_pte() is clearly + * suboptimal. We don't have to re-parse the whole tree + * each time (just cache the l0-l2 sequence). + * On the other hand, it's only a gain when we map more than + * 256 pages at once (on average). Do we really care? + */ + do { + err = mmu_get_bottom_pgd(kctx, vpfn, &pgd); + if (err != -ENOMEM) + break; + /* Fill the memory pool with enough pages for + * the page walk to succeed + */ + mutex_unlock(&kctx->mmu_lock); + err = kbase_mem_pool_grow(&kctx->mem_pool, + MIDGARD_MMU_BOTTOMLEVEL); + mutex_lock(&kctx->mmu_lock); + } while (!err); + if (err) { + dev_warn(kctx->kbdev->dev, "kbase_mmu_insert_pages: mmu_get_bottom_pgd failure\n"); + if (recover_required) { + /* Invalidate the pages we have partially + * completed */ + mmu_insert_pages_failure_recovery(kctx, + recover_vpfn, + recover_vpfn + + recover_count + ); + } + goto fail_unlock; + } + + p = pfn_to_page(PFN_DOWN(pgd)); + pgd_page = kmap(p); + if (!pgd_page) { + dev_warn(kctx->kbdev->dev, "kbase_mmu_insert_pages: kmap failure\n"); + if (recover_required) { + /* Invalidate the pages we have partially + * completed */ + mmu_insert_pages_failure_recovery(kctx, + recover_vpfn, + recover_vpfn + + recover_count + ); + } + err = -ENOMEM; + goto fail_unlock; + } + + for (i = 0; i < count; i++) { + unsigned int ofs = index + i; + + /* Fail if the current page is a valid ATE entry */ + KBASE_DEBUG_ASSERT(0 == (pgd_page[ofs] & 1UL)); + + mmu_mode->entry_set_ate(&pgd_page[ofs], + phys, flags, + MIDGARD_MMU_BOTTOMLEVEL); + } + + vpfn += count; + remain -= count; + + kbase_mmu_sync_pgd(kctx->kbdev, + kbase_dma_addr(p) + (index * sizeof(u64)), + count * sizeof(u64)); + + kunmap(p); + /* We have started modifying the page table. + * If further pages need inserting and fail we need to undo what + * has already taken place */ + recover_required = true; + recover_count += count; + } + mutex_unlock(&kctx->mmu_lock); + kbase_mmu_flush_invalidate(kctx, vpfn, nr, false); + return 0; + +fail_unlock: + mutex_unlock(&kctx->mmu_lock); + kbase_mmu_flush_invalidate(kctx, vpfn, nr, false); + return err; +} + +static inline void cleanup_empty_pte(struct kbase_context *kctx, u64 *pte) +{ + phys_addr_t tmp_pgd; + struct page *tmp_p; + + tmp_pgd = kctx->kbdev->mmu_mode->pte_to_phy_addr(*pte); + tmp_p = phys_to_page(tmp_pgd); + kbase_mem_pool_free(&kctx->mem_pool, tmp_p, false); + kbase_process_page_usage_dec(kctx, 1); + kbase_atomic_sub_pages(1, &kctx->used_pages); + kbase_atomic_sub_pages(1, &kctx->kbdev->memdev.used_pages); +} + +int kbase_mmu_insert_pages_no_flush(struct kbase_context *kctx, + const u64 start_vpfn, + struct tagged_addr *phys, size_t nr, + unsigned long flags) +{ + phys_addr_t pgd; + u64 *pgd_page; + u64 insert_vpfn = start_vpfn; + size_t remain = nr; + int err; + struct kbase_mmu_mode const *mmu_mode; + + KBASE_DEBUG_ASSERT(kctx); + KBASE_DEBUG_ASSERT(start_vpfn); + /* 64-bit address range is the max */ + KBASE_DEBUG_ASSERT(start_vpfn <= (U64_MAX / PAGE_SIZE)); + + mmu_mode = kctx->kbdev->mmu_mode; + + /* Early out if there is nothing to do */ + if (nr == 0) + return 0; + + mutex_lock(&kctx->mmu_lock); + + while (remain) { + unsigned int i; + unsigned int vindex = insert_vpfn & 0x1FF; + unsigned int count = KBASE_MMU_PAGE_ENTRIES - vindex; + struct page *p; + unsigned int cur_level; + + if (count > remain) + count = remain; + + if (!vindex && is_huge_head(*phys)) + cur_level = MIDGARD_MMU_LEVEL(2); + else + cur_level = MIDGARD_MMU_BOTTOMLEVEL; + + /* + * Repeatedly calling mmu_get_pgd_at_level() is clearly + * suboptimal. We don't have to re-parse the whole tree + * each time (just cache the l0-l2 sequence). + * On the other hand, it's only a gain when we map more than + * 256 pages at once (on average). Do we really care? + */ + do { + err = mmu_get_pgd_at_level(kctx, insert_vpfn, cur_level, + &pgd); + if (err != -ENOMEM) + break; + /* Fill the memory pool with enough pages for + * the page walk to succeed + */ + mutex_unlock(&kctx->mmu_lock); + err = kbase_mem_pool_grow(&kctx->mem_pool, + cur_level); + mutex_lock(&kctx->mmu_lock); + } while (!err); + + if (err) { + dev_warn(kctx->kbdev->dev, + "%s: mmu_get_bottom_pgd failure\n", __func__); + if (insert_vpfn != start_vpfn) { + /* Invalidate the pages we have partially + * completed */ + mmu_insert_pages_failure_recovery(kctx, + start_vpfn, + insert_vpfn); + } + goto fail_unlock; + } + + p = pfn_to_page(PFN_DOWN(pgd)); + pgd_page = kmap(p); + if (!pgd_page) { + dev_warn(kctx->kbdev->dev, "%s: kmap failure\n", + __func__); + if (insert_vpfn != start_vpfn) { + /* Invalidate the pages we have partially + * completed */ + mmu_insert_pages_failure_recovery(kctx, + start_vpfn, + insert_vpfn); + } + err = -ENOMEM; + goto fail_unlock; + } + + if (cur_level == MIDGARD_MMU_LEVEL(2)) { + unsigned int level_index = (insert_vpfn >> 9) & 0x1FF; + u64 *target = &pgd_page[level_index]; + + if (mmu_mode->pte_is_valid(*target, cur_level)) + cleanup_empty_pte(kctx, target); + mmu_mode->entry_set_ate(target, *phys, flags, + cur_level); + } else { + for (i = 0; i < count; i++) { + unsigned int ofs = vindex + i; + u64 *target = &pgd_page[ofs]; + + /* Fail if the current page is a valid ATE entry + */ + KBASE_DEBUG_ASSERT(0 == (*target & 1UL)); + + kctx->kbdev->mmu_mode->entry_set_ate(target, + phys[i], flags, cur_level); + } + } + + phys += count; + insert_vpfn += count; + remain -= count; + + kbase_mmu_sync_pgd(kctx->kbdev, + kbase_dma_addr(p) + (vindex * sizeof(u64)), + count * sizeof(u64)); + + kunmap(p); + } + + mutex_unlock(&kctx->mmu_lock); + return 0; + +fail_unlock: + mutex_unlock(&kctx->mmu_lock); + return err; +} + +/* + * Map 'nr' pages pointed to by 'phys' at GPU PFN 'vpfn' + */ +int kbase_mmu_insert_pages(struct kbase_context *kctx, u64 vpfn, + struct tagged_addr *phys, size_t nr, + unsigned long flags) +{ + int err; + + err = kbase_mmu_insert_pages_no_flush(kctx, vpfn, phys, nr, flags); + kbase_mmu_flush_invalidate(kctx, vpfn, nr, false); + return err; +} + +KBASE_EXPORT_TEST_API(kbase_mmu_insert_pages); + +/** + * kbase_mmu_flush_invalidate_noretain() - Flush and invalidate the GPU caches + * without retaining the kbase context. + * @kctx: The KBase context. + * @vpfn: The virtual page frame number to start the flush on. + * @nr: The number of pages to flush. + * @sync: Set if the operation should be synchronous or not. + * + * As per kbase_mmu_flush_invalidate but doesn't retain the kctx or do any + * other locking. + */ +static void kbase_mmu_flush_invalidate_noretain(struct kbase_context *kctx, + u64 vpfn, size_t nr, bool sync) +{ + struct kbase_device *kbdev = kctx->kbdev; + int err; + u32 op; + + /* Early out if there is nothing to do */ + if (nr == 0) + return; + + if (sync) + op = AS_COMMAND_FLUSH_MEM; + else + op = AS_COMMAND_FLUSH_PT; + + err = kbase_mmu_hw_do_operation(kbdev, + &kbdev->as[kctx->as_nr], + kctx, vpfn, nr, op, 0); +#if KBASE_GPU_RESET_EN + if (err) { + /* Flush failed to complete, assume the + * GPU has hung and perform a reset to + * recover */ + dev_err(kbdev->dev, "Flush for GPU page table update did not complete. Issuing GPU soft-reset to recover\n"); + + if (kbase_prepare_to_reset_gpu_locked(kbdev)) + kbase_reset_gpu_locked(kbdev); + } +#endif /* KBASE_GPU_RESET_EN */ + +#ifndef CONFIG_MALI_NO_MALI + /* + * As this function could be called in interrupt context the sync + * request can't block. Instead log the request and the next flush + * request will pick it up. + */ + if ((!err) && sync && + kbase_hw_has_issue(kctx->kbdev, BASE_HW_ISSUE_6367)) + atomic_set(&kctx->drain_pending, 1); +#endif /* !CONFIG_MALI_NO_MALI */ +} + +static void kbase_mmu_flush_invalidate(struct kbase_context *kctx, + u64 vpfn, size_t nr, bool sync) +{ + struct kbase_device *kbdev; + bool ctx_is_in_runpool; +#ifndef CONFIG_MALI_NO_MALI + bool drain_pending = false; + + if (atomic_xchg(&kctx->drain_pending, 0)) + drain_pending = true; +#endif /* !CONFIG_MALI_NO_MALI */ + + /* Early out if there is nothing to do */ + if (nr == 0) + return; + + kbdev = kctx->kbdev; + mutex_lock(&kbdev->js_data.queue_mutex); + ctx_is_in_runpool = kbasep_js_runpool_retain_ctx(kbdev, kctx); + mutex_unlock(&kbdev->js_data.queue_mutex); + + if (ctx_is_in_runpool) { + KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID); + + if (!kbase_pm_context_active_handle_suspend(kbdev, + KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE)) { + int err; + u32 op; + + /* AS transaction begin */ + mutex_lock(&kbdev->mmu_hw_mutex); + + if (sync) + op = AS_COMMAND_FLUSH_MEM; + else + op = AS_COMMAND_FLUSH_PT; + + err = kbase_mmu_hw_do_operation(kbdev, + &kbdev->as[kctx->as_nr], + kctx, vpfn, nr, op, 0); + +#if KBASE_GPU_RESET_EN + if (err) { + /* Flush failed to complete, assume the + * GPU has hung and perform a reset to + * recover */ + dev_err(kbdev->dev, "Flush for GPU page table update did not complete. Issueing GPU soft-reset to recover\n"); + + if (kbase_prepare_to_reset_gpu(kbdev)) + kbase_reset_gpu(kbdev); + } +#endif /* KBASE_GPU_RESET_EN */ + + mutex_unlock(&kbdev->mmu_hw_mutex); + /* AS transaction end */ + +#ifndef CONFIG_MALI_NO_MALI + /* + * The transaction lock must be dropped before here + * as kbase_wait_write_flush could take it if + * the GPU was powered down (static analysis doesn't + * know this can't happen). + */ + drain_pending |= (!err) && sync && + kbase_hw_has_issue(kctx->kbdev, + BASE_HW_ISSUE_6367); + if (drain_pending) { + /* Wait for GPU to flush write buffer */ + kbase_wait_write_flush(kctx); + } +#endif /* !CONFIG_MALI_NO_MALI */ + + kbase_pm_context_idle(kbdev); + } + kbasep_js_runpool_release_ctx(kbdev, kctx); + } +} + +void kbase_mmu_update(struct kbase_context *kctx) +{ + lockdep_assert_held(&kctx->kbdev->hwaccess_lock); + lockdep_assert_held(&kctx->kbdev->mmu_hw_mutex); + /* ASSERT that the context has a valid as_nr, which is only the case + * when it's scheduled in. + * + * as_nr won't change because the caller has the hwaccess_lock */ + KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID); + + kctx->kbdev->mmu_mode->update(kctx); +} +KBASE_EXPORT_TEST_API(kbase_mmu_update); + +void kbase_mmu_disable_as(struct kbase_device *kbdev, int as_nr) +{ + lockdep_assert_held(&kbdev->hwaccess_lock); + lockdep_assert_held(&kbdev->mmu_hw_mutex); + + kbdev->mmu_mode->disable_as(kbdev, as_nr); +} + +void kbase_mmu_disable(struct kbase_context *kctx) +{ + /* ASSERT that the context has a valid as_nr, which is only the case + * when it's scheduled in. + * + * as_nr won't change because the caller has the hwaccess_lock */ + KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID); + + lockdep_assert_held(&kctx->kbdev->hwaccess_lock); + + /* + * The address space is being disabled, drain all knowledge of it out + * from the caches as pages and page tables might be freed after this. + * + * The job scheduler code will already be holding the locks and context + * so just do the flush. + */ + kbase_mmu_flush_invalidate_noretain(kctx, 0, ~0, true); + + kctx->kbdev->mmu_mode->disable_as(kctx->kbdev, kctx->as_nr); +} +KBASE_EXPORT_TEST_API(kbase_mmu_disable); + +/* + * We actually only discard the ATE, and not the page table + * pages. There is a potential DoS here, as we'll leak memory by + * having PTEs that are potentially unused. Will require physical + * page accounting, so MMU pages are part of the process allocation. + * + * IMPORTANT: This uses kbasep_js_runpool_release_ctx() when the context is + * currently scheduled into the runpool, and so potentially uses a lot of locks. + * These locks must be taken in the correct order with respect to others + * already held by the caller. Refer to kbasep_js_runpool_release_ctx() for more + * information. + */ +int kbase_mmu_teardown_pages(struct kbase_context *kctx, u64 vpfn, size_t nr) +{ + phys_addr_t pgd; + size_t requested_nr = nr; + struct kbase_mmu_mode const *mmu_mode; + int err = -EFAULT; + + KBASE_DEBUG_ASSERT(NULL != kctx); + beenthere(kctx, "kctx %p vpfn %lx nr %zd", (void *)kctx, (unsigned long)vpfn, nr); + + if (0 == nr) { + /* early out if nothing to do */ + return 0; + } + + mutex_lock(&kctx->mmu_lock); + + mmu_mode = kctx->kbdev->mmu_mode; + + while (nr) { + unsigned int i; + unsigned int index = vpfn & 0x1FF; + unsigned int count = KBASE_MMU_PAGE_ENTRIES - index; + unsigned int pcount; + unsigned int level; + u64 *page; + + if (count > nr) + count = nr; + + /* need to check if this is a 2MB or a 4kB page */ + pgd = kctx->pgd; + + for (level = MIDGARD_MMU_TOPLEVEL; + level <= MIDGARD_MMU_BOTTOMLEVEL; level++) { + phys_addr_t next_pgd; + + index = (vpfn >> ((3 - level) * 9)) & 0x1FF; + page = kmap(phys_to_page(pgd)); + if (mmu_mode->ate_is_valid(page[index], level)) + break; /* keep the mapping */ + else if (!mmu_mode->pte_is_valid(page[index], level)) { + /* nothing here, advance */ + switch (level) { + case MIDGARD_MMU_LEVEL(0): + count = 134217728; + break; + case MIDGARD_MMU_LEVEL(1): + count = 262144; + break; + case MIDGARD_MMU_LEVEL(2): + count = 512; + break; + case MIDGARD_MMU_LEVEL(3): + count = 1; + break; + } + if (count > nr) + count = nr; + goto next; + } + next_pgd = mmu_mode->pte_to_phy_addr(page[index]); + kunmap(phys_to_page(pgd)); + pgd = next_pgd; + } + + switch (level) { + case MIDGARD_MMU_LEVEL(0): + case MIDGARD_MMU_LEVEL(1): + dev_warn(kctx->kbdev->dev, + "%s: No support for ATEs at level %d\n", + __func__, level); + kunmap(phys_to_page(pgd)); + goto out; + case MIDGARD_MMU_LEVEL(2): + /* can only teardown if count >= 512 */ + if (count >= 512) { + pcount = 1; + } else { + dev_warn(kctx->kbdev->dev, + "%s: limiting teardown as it tries to do a partial 2MB teardown, need 512, but have %d to tear down\n", + __func__, count); + pcount = 0; + } + break; + case MIDGARD_MMU_BOTTOMLEVEL: + /* page count is the same as the logical count */ + pcount = count; + break; + default: + dev_err(kctx->kbdev->dev, + "%s: found non-mapped memory, early out\n", + __func__); + vpfn += count; + nr -= count; + continue; + } + + /* Invalidate the entries we added */ + for (i = 0; i < pcount; i++) + mmu_mode->entry_invalidate(&page[index + i]); + + kbase_mmu_sync_pgd(kctx->kbdev, + kbase_dma_addr(phys_to_page(pgd)) + + 8 * index, 8*pcount); + +next: + kunmap(phys_to_page(pgd)); + vpfn += count; + nr -= count; + } + err = 0; +out: + mutex_unlock(&kctx->mmu_lock); + kbase_mmu_flush_invalidate(kctx, vpfn, requested_nr, true); + return err; +} + +KBASE_EXPORT_TEST_API(kbase_mmu_teardown_pages); + +/** + * Update the entries for specified number of pages pointed to by 'phys' at GPU PFN 'vpfn'. + * This call is being triggered as a response to the changes of the mem attributes + * + * @pre : The caller is responsible for validating the memory attributes + * + * IMPORTANT: This uses kbasep_js_runpool_release_ctx() when the context is + * currently scheduled into the runpool, and so potentially uses a lot of locks. + * These locks must be taken in the correct order with respect to others + * already held by the caller. Refer to kbasep_js_runpool_release_ctx() for more + * information. + */ +int kbase_mmu_update_pages(struct kbase_context *kctx, u64 vpfn, + struct tagged_addr *phys, size_t nr, + unsigned long flags) +{ + phys_addr_t pgd; + u64 *pgd_page; + size_t requested_nr = nr; + struct kbase_mmu_mode const *mmu_mode; + int err; + + KBASE_DEBUG_ASSERT(NULL != kctx); + KBASE_DEBUG_ASSERT(0 != vpfn); + KBASE_DEBUG_ASSERT(vpfn <= (U64_MAX / PAGE_SIZE)); + + /* Early out if there is nothing to do */ + if (nr == 0) + return 0; + + mutex_lock(&kctx->mmu_lock); + + mmu_mode = kctx->kbdev->mmu_mode; + + dev_warn(kctx->kbdev->dev, "kbase_mmu_update_pages(): updating page share flags on GPU PFN 0x%llx from phys %p, %zu pages", + vpfn, phys, nr); + + while (nr) { + unsigned int i; + unsigned int index = vpfn & 0x1FF; + size_t count = KBASE_MMU_PAGE_ENTRIES - index; + struct page *p; + + if (count > nr) + count = nr; + + do { + err = mmu_get_bottom_pgd(kctx, vpfn, &pgd); + if (err != -ENOMEM) + break; + /* Fill the memory pool with enough pages for + * the page walk to succeed + */ + mutex_unlock(&kctx->mmu_lock); + err = kbase_mem_pool_grow(&kctx->mem_pool, + MIDGARD_MMU_BOTTOMLEVEL); + mutex_lock(&kctx->mmu_lock); + } while (!err); + if (err) { + dev_warn(kctx->kbdev->dev, + "mmu_get_bottom_pgd failure\n"); + goto fail_unlock; + } + + p = pfn_to_page(PFN_DOWN(pgd)); + pgd_page = kmap(p); + if (!pgd_page) { + dev_warn(kctx->kbdev->dev, "kmap failure\n"); + err = -ENOMEM; + goto fail_unlock; + } + + for (i = 0; i < count; i++) + mmu_mode->entry_set_ate(&pgd_page[index + i], phys[i], + flags, MIDGARD_MMU_BOTTOMLEVEL); + + phys += count; + vpfn += count; + nr -= count; + + kbase_mmu_sync_pgd(kctx->kbdev, + kbase_dma_addr(p) + (index * sizeof(u64)), + count * sizeof(u64)); + + kunmap(pfn_to_page(PFN_DOWN(pgd))); + } + + mutex_unlock(&kctx->mmu_lock); + kbase_mmu_flush_invalidate(kctx, vpfn, requested_nr, true); + return 0; + +fail_unlock: + mutex_unlock(&kctx->mmu_lock); + kbase_mmu_flush_invalidate(kctx, vpfn, requested_nr, true); + return err; +} + +static void mmu_teardown_level(struct kbase_context *kctx, phys_addr_t pgd, + int level, u64 *pgd_page_buffer) +{ + phys_addr_t target_pgd; + struct page *p; + u64 *pgd_page; + int i; + struct kbase_mmu_mode const *mmu_mode; + + KBASE_DEBUG_ASSERT(NULL != kctx); + lockdep_assert_held(&kctx->mmu_lock); + lockdep_assert_held(&kctx->reg_lock); + + pgd_page = kmap_atomic(pfn_to_page(PFN_DOWN(pgd))); + /* kmap_atomic should NEVER fail. */ + KBASE_DEBUG_ASSERT(NULL != pgd_page); + /* Copy the page to our preallocated buffer so that we can minimize + * kmap_atomic usage */ + memcpy(pgd_page_buffer, pgd_page, PAGE_SIZE); + kunmap_atomic(pgd_page); + pgd_page = pgd_page_buffer; + + mmu_mode = kctx->kbdev->mmu_mode; + + for (i = 0; i < KBASE_MMU_PAGE_ENTRIES; i++) { + target_pgd = mmu_mode->pte_to_phy_addr(pgd_page[i]); + + if (target_pgd) { + if (mmu_mode->pte_is_valid(pgd_page[i], level)) { + mmu_teardown_level(kctx, + target_pgd, + level + 1, + pgd_page_buffer + + (PAGE_SIZE / sizeof(u64))); + } + } + } + + p = pfn_to_page(PFN_DOWN(pgd)); + kbase_mem_pool_free(&kctx->mem_pool, p, true); + kbase_process_page_usage_dec(kctx, 1); + kbase_atomic_sub_pages(1, &kctx->used_pages); + kbase_atomic_sub_pages(1, &kctx->kbdev->memdev.used_pages); +} + +int kbase_mmu_init(struct kbase_context *kctx) +{ + KBASE_DEBUG_ASSERT(NULL != kctx); + KBASE_DEBUG_ASSERT(NULL == kctx->mmu_teardown_pages); + + mutex_init(&kctx->mmu_lock); + + /* Preallocate MMU depth of four pages for mmu_teardown_level to use */ + kctx->mmu_teardown_pages = kmalloc(PAGE_SIZE * 4, GFP_KERNEL); + + if (NULL == kctx->mmu_teardown_pages) + return -ENOMEM; + + return 0; +} + +void kbase_mmu_term(struct kbase_context *kctx) +{ + KBASE_DEBUG_ASSERT(NULL != kctx); + KBASE_DEBUG_ASSERT(NULL != kctx->mmu_teardown_pages); + + kfree(kctx->mmu_teardown_pages); + kctx->mmu_teardown_pages = NULL; +} + +void kbase_mmu_free_pgd(struct kbase_context *kctx) +{ + int new_page_count = 0; + + KBASE_DEBUG_ASSERT(NULL != kctx); + KBASE_DEBUG_ASSERT(NULL != kctx->mmu_teardown_pages); + + mutex_lock(&kctx->mmu_lock); + mmu_teardown_level(kctx, kctx->pgd, MIDGARD_MMU_TOPLEVEL, + kctx->mmu_teardown_pages); + mutex_unlock(&kctx->mmu_lock); + + KBASE_TLSTREAM_AUX_PAGESALLOC( + kctx->id, + (u64)new_page_count); +} + +KBASE_EXPORT_TEST_API(kbase_mmu_free_pgd); + +static size_t kbasep_mmu_dump_level(struct kbase_context *kctx, phys_addr_t pgd, int level, char ** const buffer, size_t *size_left) +{ + phys_addr_t target_pgd; + u64 *pgd_page; + int i; + size_t size = KBASE_MMU_PAGE_ENTRIES * sizeof(u64) + sizeof(u64); + size_t dump_size; + struct kbase_mmu_mode const *mmu_mode; + + KBASE_DEBUG_ASSERT(NULL != kctx); + lockdep_assert_held(&kctx->mmu_lock); + + mmu_mode = kctx->kbdev->mmu_mode; + + pgd_page = kmap(pfn_to_page(PFN_DOWN(pgd))); + if (!pgd_page) { + dev_warn(kctx->kbdev->dev, "kbasep_mmu_dump_level: kmap failure\n"); + return 0; + } + + if (*size_left >= size) { + /* A modified physical address that contains the page table level */ + u64 m_pgd = pgd | level; + + /* Put the modified physical address in the output buffer */ + memcpy(*buffer, &m_pgd, sizeof(m_pgd)); + *buffer += sizeof(m_pgd); + + /* Followed by the page table itself */ + memcpy(*buffer, pgd_page, sizeof(u64) * KBASE_MMU_PAGE_ENTRIES); + *buffer += sizeof(u64) * KBASE_MMU_PAGE_ENTRIES; + + *size_left -= size; + } + + if (level < MIDGARD_MMU_BOTTOMLEVEL) { + for (i = 0; i < KBASE_MMU_PAGE_ENTRIES; i++) { + if (mmu_mode->pte_is_valid(pgd_page[i], level)) { + target_pgd = mmu_mode->pte_to_phy_addr( + pgd_page[i]); + + dump_size = kbasep_mmu_dump_level(kctx, + target_pgd, level + 1, + buffer, size_left); + if (!dump_size) { + kunmap(pfn_to_page(PFN_DOWN(pgd))); + return 0; + } + size += dump_size; + } + } + } + + kunmap(pfn_to_page(PFN_DOWN(pgd))); + + return size; +} + +void *kbase_mmu_dump(struct kbase_context *kctx, int nr_pages) +{ + void *kaddr; + size_t size_left; + + KBASE_DEBUG_ASSERT(kctx); + + if (0 == nr_pages) { + /* can't dump in a 0 sized buffer, early out */ + return NULL; + } + + size_left = nr_pages * PAGE_SIZE; + + KBASE_DEBUG_ASSERT(0 != size_left); + kaddr = vmalloc_user(size_left); + + mutex_lock(&kctx->mmu_lock); + + if (kaddr) { + u64 end_marker = 0xFFULL; + char *buffer; + char *mmu_dump_buffer; + u64 config[3]; + size_t dump_size, size = 0; + + buffer = (char *)kaddr; + mmu_dump_buffer = buffer; + + if (kctx->api_version >= KBASE_API_VERSION(8, 4)) { + struct kbase_mmu_setup as_setup; + + kctx->kbdev->mmu_mode->get_as_setup(kctx, &as_setup); + config[0] = as_setup.transtab; + config[1] = as_setup.memattr; + config[2] = as_setup.transcfg; + memcpy(buffer, &config, sizeof(config)); + mmu_dump_buffer += sizeof(config); + size_left -= sizeof(config); + size += sizeof(config); + } + + dump_size = kbasep_mmu_dump_level(kctx, + kctx->pgd, + MIDGARD_MMU_TOPLEVEL, + &mmu_dump_buffer, + &size_left); + + if (!dump_size) + goto fail_free; + + size += dump_size; + + /* Add on the size for the end marker */ + size += sizeof(u64); + + if (size > (nr_pages * PAGE_SIZE)) { + /* The buffer isn't big enough - free the memory and return failure */ + goto fail_free; + } + + /* Add the end marker */ + memcpy(mmu_dump_buffer, &end_marker, sizeof(u64)); + } + + mutex_unlock(&kctx->mmu_lock); + return kaddr; + +fail_free: + vfree(kaddr); + mutex_unlock(&kctx->mmu_lock); + return NULL; +} +KBASE_EXPORT_TEST_API(kbase_mmu_dump); + +void bus_fault_worker(struct work_struct *data) +{ + struct kbase_as *faulting_as; + int as_no; + struct kbase_context *kctx; + struct kbase_device *kbdev; +#if KBASE_GPU_RESET_EN + bool reset_status = false; +#endif /* KBASE_GPU_RESET_EN */ + + faulting_as = container_of(data, struct kbase_as, work_busfault); + + as_no = faulting_as->number; + + kbdev = container_of(faulting_as, struct kbase_device, as[as_no]); + + /* Grab the context that was already refcounted in kbase_mmu_interrupt(). + * Therefore, it cannot be scheduled out of this AS until we explicitly release it + */ + kctx = kbasep_js_runpool_lookup_ctx_noretain(kbdev, as_no); + if (WARN_ON(!kctx)) { + atomic_dec(&kbdev->faults_pending); + return; + } + + if (unlikely(faulting_as->protected_mode)) + { + kbase_mmu_report_fault_and_kill(kctx, faulting_as, + "Permission failure"); + kbase_mmu_hw_clear_fault(kbdev, faulting_as, kctx, + KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED); + kbasep_js_runpool_release_ctx(kbdev, kctx); + atomic_dec(&kbdev->faults_pending); + return; + + } + +#if KBASE_GPU_RESET_EN + if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8245)) { + /* Due to H/W issue 8245 we need to reset the GPU after using UNMAPPED mode. + * We start the reset before switching to UNMAPPED to ensure that unrelated jobs + * are evicted from the GPU before the switch. + */ + dev_err(kbdev->dev, "GPU bus error occurred. For this GPU version we now soft-reset as part of bus error recovery\n"); + reset_status = kbase_prepare_to_reset_gpu(kbdev); + } +#endif /* KBASE_GPU_RESET_EN */ + /* NOTE: If GPU already powered off for suspend, we don't need to switch to unmapped */ + if (!kbase_pm_context_active_handle_suspend(kbdev, KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE)) { + unsigned long flags; + + /* switch to UNMAPPED mode, will abort all jobs and stop any hw counter dumping */ + /* AS transaction begin */ + mutex_lock(&kbdev->mmu_hw_mutex); + + /* Set the MMU into unmapped mode */ + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); + kbase_mmu_disable(kctx); + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); + + mutex_unlock(&kbdev->mmu_hw_mutex); + /* AS transaction end */ + + kbase_mmu_hw_clear_fault(kbdev, faulting_as, kctx, + KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED); + kbase_mmu_hw_enable_fault(kbdev, faulting_as, kctx, + KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED); + + kbase_pm_context_idle(kbdev); + } + +#if KBASE_GPU_RESET_EN + if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8245) && reset_status) + kbase_reset_gpu(kbdev); +#endif /* KBASE_GPU_RESET_EN */ + + kbasep_js_runpool_release_ctx(kbdev, kctx); + + atomic_dec(&kbdev->faults_pending); +} + +const char *kbase_exception_name(struct kbase_device *kbdev, u32 exception_code) +{ + const char *e; + + switch (exception_code) { + /* Non-Fault Status code */ + case 0x00: + e = "NOT_STARTED/IDLE/OK"; + break; + case 0x01: + e = "DONE"; + break; + case 0x02: + e = "INTERRUPTED"; + break; + case 0x03: + e = "STOPPED"; + break; + case 0x04: + e = "TERMINATED"; + break; + case 0x08: + e = "ACTIVE"; + break; + /* Job exceptions */ + case 0x40: + e = "JOB_CONFIG_FAULT"; + break; + case 0x41: + e = "JOB_POWER_FAULT"; + break; + case 0x42: + e = "JOB_READ_FAULT"; + break; + case 0x43: + e = "JOB_WRITE_FAULT"; + break; + case 0x44: + e = "JOB_AFFINITY_FAULT"; + break; + case 0x48: + e = "JOB_BUS_FAULT"; + break; + case 0x50: + e = "INSTR_INVALID_PC"; + break; + case 0x51: + e = "INSTR_INVALID_ENC"; + break; + case 0x52: + e = "INSTR_TYPE_MISMATCH"; + break; + case 0x53: + e = "INSTR_OPERAND_FAULT"; + break; + case 0x54: + e = "INSTR_TLS_FAULT"; + break; + case 0x55: + e = "INSTR_BARRIER_FAULT"; + break; + case 0x56: + e = "INSTR_ALIGN_FAULT"; + break; + case 0x58: + e = "DATA_INVALID_FAULT"; + break; + case 0x59: + e = "TILE_RANGE_FAULT"; + break; + case 0x5A: + e = "ADDR_RANGE_FAULT"; + break; + case 0x60: + e = "OUT_OF_MEMORY"; + break; + /* GPU exceptions */ + case 0x80: + e = "DELAYED_BUS_FAULT"; + break; + case 0x88: + e = "SHAREABILITY_FAULT"; + break; + /* MMU exceptions */ + case 0xC0: + case 0xC1: + case 0xC2: + case 0xC3: + case 0xC4: + case 0xC5: + case 0xC6: + case 0xC7: + e = "TRANSLATION_FAULT"; + break; + case 0xC8: + e = "PERMISSION_FAULT"; + break; + case 0xC9: + case 0xCA: + case 0xCB: + case 0xCC: + case 0xCD: + case 0xCE: + case 0xCF: + if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_AARCH64_MMU)) + e = "PERMISSION_FAULT"; + else + e = "UNKNOWN"; + break; + case 0xD0: + case 0xD1: + case 0xD2: + case 0xD3: + case 0xD4: + case 0xD5: + case 0xD6: + case 0xD7: + e = "TRANSTAB_BUS_FAULT"; + break; + case 0xD8: + e = "ACCESS_FLAG"; + break; + case 0xD9: + case 0xDA: + case 0xDB: + case 0xDC: + case 0xDD: + case 0xDE: + case 0xDF: + if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_AARCH64_MMU)) + e = "ACCESS_FLAG"; + else + e = "UNKNOWN"; + break; + case 0xE0: + case 0xE1: + case 0xE2: + case 0xE3: + case 0xE4: + case 0xE5: + case 0xE6: + case 0xE7: + if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_AARCH64_MMU)) + e = "ADDRESS_SIZE_FAULT"; + else + e = "UNKNOWN"; + break; + case 0xE8: + case 0xE9: + case 0xEA: + case 0xEB: + case 0xEC: + case 0xED: + case 0xEE: + case 0xEF: + if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_AARCH64_MMU)) + e = "MEMORY_ATTRIBUTES_FAULT"; + else + e = "UNKNOWN"; + break; + default: + e = "UNKNOWN"; + break; + }; + + return e; +} + +static const char *access_type_name(struct kbase_device *kbdev, + u32 fault_status) +{ + switch (fault_status & AS_FAULTSTATUS_ACCESS_TYPE_MASK) { + case AS_FAULTSTATUS_ACCESS_TYPE_ATOMIC: + if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_AARCH64_MMU)) + return "ATOMIC"; + else + return "UNKNOWN"; + case AS_FAULTSTATUS_ACCESS_TYPE_READ: + return "READ"; + case AS_FAULTSTATUS_ACCESS_TYPE_WRITE: + return "WRITE"; + case AS_FAULTSTATUS_ACCESS_TYPE_EX: + return "EXECUTE"; + default: + WARN_ON(1); + return NULL; + } +} + +/** + * The caller must ensure it's retained the ctx to prevent it from being scheduled out whilst it's being worked on. + */ +static void kbase_mmu_report_fault_and_kill(struct kbase_context *kctx, + struct kbase_as *as, const char *reason_str) +{ + unsigned long flags; + int exception_type; + int access_type; + int source_id; + int as_no; + struct kbase_device *kbdev; + struct kbasep_js_device_data *js_devdata; + +#if KBASE_GPU_RESET_EN + bool reset_status = false; +#endif + + as_no = as->number; + kbdev = kctx->kbdev; + js_devdata = &kbdev->js_data; + + /* ASSERT that the context won't leave the runpool */ + KBASE_DEBUG_ASSERT(atomic_read(&kctx->refcount) > 0); + + /* decode the fault status */ + exception_type = as->fault_status & 0xFF; + access_type = (as->fault_status >> 8) & 0x3; + source_id = (as->fault_status >> 16); + + /* terminal fault, print info about the fault */ + dev_err(kbdev->dev, + "Unhandled Page fault in AS%d at VA 0x%016llX\n" + "Reason: %s\n" + "raw fault status: 0x%X\n" + "decoded fault status: %s\n" + "exception type 0x%X: %s\n" + "access type 0x%X: %s\n" + "source id 0x%X\n" + "pid: %d\n", + as_no, as->fault_addr, + reason_str, + as->fault_status, + (as->fault_status & (1 << 10) ? "DECODER FAULT" : "SLAVE FAULT"), + exception_type, kbase_exception_name(kbdev, exception_type), + access_type, access_type_name(kbdev, as->fault_status), + source_id, + kctx->pid); + + /* hardware counters dump fault handling */ + if ((kbdev->hwcnt.kctx) && (kbdev->hwcnt.kctx->as_nr == as_no) && + (kbdev->hwcnt.backend.state == + KBASE_INSTR_STATE_DUMPING)) { + unsigned int num_core_groups = kbdev->gpu_props.num_core_groups; + + if ((as->fault_addr >= kbdev->hwcnt.addr) && + (as->fault_addr < (kbdev->hwcnt.addr + + (num_core_groups * 2048)))) + kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_FAULT; + } + + /* Stop the kctx from submitting more jobs and cause it to be scheduled + * out/rescheduled - this will occur on releasing the context's refcount */ + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); + kbasep_js_clear_submit_allowed(js_devdata, kctx); + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); + + /* Kill any running jobs from the context. Submit is disallowed, so no more jobs from this + * context can appear in the job slots from this point on */ + kbase_backend_jm_kill_jobs_from_kctx(kctx); + /* AS transaction begin */ + mutex_lock(&kbdev->mmu_hw_mutex); +#if KBASE_GPU_RESET_EN + if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8245)) { + /* Due to H/W issue 8245 we need to reset the GPU after using UNMAPPED mode. + * We start the reset before switching to UNMAPPED to ensure that unrelated jobs + * are evicted from the GPU before the switch. + */ + dev_err(kbdev->dev, "Unhandled page fault. For this GPU version we now soft-reset the GPU as part of page fault recovery."); + reset_status = kbase_prepare_to_reset_gpu(kbdev); + } +#endif /* KBASE_GPU_RESET_EN */ + /* switch to UNMAPPED mode, will abort all jobs and stop any hw counter dumping */ + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); + kbase_mmu_disable(kctx); + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); + + mutex_unlock(&kbdev->mmu_hw_mutex); + /* AS transaction end */ + /* Clear down the fault */ + kbase_mmu_hw_clear_fault(kbdev, as, kctx, + KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED); + kbase_mmu_hw_enable_fault(kbdev, as, kctx, + KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED); + +#if KBASE_GPU_RESET_EN + if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8245) && reset_status) + kbase_reset_gpu(kbdev); +#endif /* KBASE_GPU_RESET_EN */ +} + +void kbasep_as_do_poke(struct work_struct *work) +{ + struct kbase_as *as; + struct kbase_device *kbdev; + struct kbase_context *kctx; + unsigned long flags; + + KBASE_DEBUG_ASSERT(work); + as = container_of(work, struct kbase_as, poke_work); + kbdev = container_of(as, struct kbase_device, as[as->number]); + KBASE_DEBUG_ASSERT(as->poke_state & KBASE_AS_POKE_STATE_IN_FLIGHT); + + /* GPU power will already be active by virtue of the caller holding a JS + * reference on the address space, and will not release it until this worker + * has finished */ + + /* Further to the comment above, we know that while this function is running + * the AS will not be released as before the atom is released this workqueue + * is flushed (in kbase_as_poking_timer_release_atom) + */ + kctx = kbasep_js_runpool_lookup_ctx_noretain(kbdev, as->number); + + /* AS transaction begin */ + mutex_lock(&kbdev->mmu_hw_mutex); + /* Force a uTLB invalidate */ + kbase_mmu_hw_do_operation(kbdev, as, kctx, 0, 0, + AS_COMMAND_UNLOCK, 0); + mutex_unlock(&kbdev->mmu_hw_mutex); + /* AS transaction end */ + + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); + if (as->poke_refcount && + !(as->poke_state & KBASE_AS_POKE_STATE_KILLING_POKE)) { + /* Only queue up the timer if we need it, and we're not trying to kill it */ + hrtimer_start(&as->poke_timer, HR_TIMER_DELAY_MSEC(5), HRTIMER_MODE_REL); + } + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); +} + +enum hrtimer_restart kbasep_as_poke_timer_callback(struct hrtimer *timer) +{ + struct kbase_as *as; + int queue_work_ret; + + KBASE_DEBUG_ASSERT(NULL != timer); + as = container_of(timer, struct kbase_as, poke_timer); + KBASE_DEBUG_ASSERT(as->poke_state & KBASE_AS_POKE_STATE_IN_FLIGHT); + + queue_work_ret = queue_work(as->poke_wq, &as->poke_work); + KBASE_DEBUG_ASSERT(queue_work_ret); + return HRTIMER_NORESTART; +} + +/** + * Retain the poking timer on an atom's context (if the atom hasn't already + * done so), and start the timer (if it's not already started). + * + * This must only be called on a context that's scheduled in, and an atom + * that's running on the GPU. + * + * The caller must hold hwaccess_lock + * + * This can be called safely from atomic context + */ +void kbase_as_poking_timer_retain_atom(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *katom) +{ + struct kbase_as *as; + + KBASE_DEBUG_ASSERT(kbdev); + KBASE_DEBUG_ASSERT(kctx); + KBASE_DEBUG_ASSERT(katom); + KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID); + lockdep_assert_held(&kbdev->hwaccess_lock); + + if (katom->poking) + return; + + katom->poking = 1; + + /* It's safe to work on the as/as_nr without an explicit reference, + * because the caller holds the hwaccess_lock, and the atom itself + * was also running and had already taken a reference */ + as = &kbdev->as[kctx->as_nr]; + + if (++(as->poke_refcount) == 1) { + /* First refcount for poke needed: check if not already in flight */ + if (!as->poke_state) { + /* need to start poking */ + as->poke_state |= KBASE_AS_POKE_STATE_IN_FLIGHT; + queue_work(as->poke_wq, &as->poke_work); + } + } +} + +/** + * If an atom holds a poking timer, release it and wait for it to finish + * + * This must only be called on a context that's scheduled in, and an atom + * that still has a JS reference on the context + * + * This must \b not be called from atomic context, since it can sleep. + */ +void kbase_as_poking_timer_release_atom(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *katom) +{ + struct kbase_as *as; + unsigned long flags; + + KBASE_DEBUG_ASSERT(kbdev); + KBASE_DEBUG_ASSERT(kctx); + KBASE_DEBUG_ASSERT(katom); + KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID); + + if (!katom->poking) + return; + + as = &kbdev->as[kctx->as_nr]; + + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); + KBASE_DEBUG_ASSERT(as->poke_refcount > 0); + KBASE_DEBUG_ASSERT(as->poke_state & KBASE_AS_POKE_STATE_IN_FLIGHT); + + if (--(as->poke_refcount) == 0) { + as->poke_state |= KBASE_AS_POKE_STATE_KILLING_POKE; + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); + + hrtimer_cancel(&as->poke_timer); + flush_workqueue(as->poke_wq); + + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); + + /* Re-check whether it's still needed */ + if (as->poke_refcount) { + int queue_work_ret; + /* Poking still needed: + * - Another retain will not be starting the timer or queueing work, + * because it's still marked as in-flight + * - The hrtimer has finished, and has not started a new timer or + * queued work because it's been marked as killing + * + * So whatever happens now, just queue the work again */ + as->poke_state &= ~((kbase_as_poke_state)KBASE_AS_POKE_STATE_KILLING_POKE); + queue_work_ret = queue_work(as->poke_wq, &as->poke_work); + KBASE_DEBUG_ASSERT(queue_work_ret); + } else { + /* It isn't - so mark it as not in flight, and not killing */ + as->poke_state = 0u; + + /* The poke associated with the atom has now finished. If this is + * also the last atom on the context, then we can guarentee no more + * pokes (and thus no more poking register accesses) will occur on + * the context until new atoms are run */ + } + } + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); + + katom->poking = 0; +} + +void kbase_mmu_interrupt_process(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_as *as) +{ + struct kbasep_js_device_data *js_devdata = &kbdev->js_data; + + lockdep_assert_held(&kbdev->hwaccess_lock); + + if (!kctx) { + dev_warn(kbdev->dev, "%s in AS%d at 0x%016llx with no context present! Suprious IRQ or SW Design Error?\n", + kbase_as_has_bus_fault(as) ? "Bus error" : "Page fault", + as->number, as->fault_addr); + + /* Since no ctx was found, the MMU must be disabled. */ + WARN_ON(as->current_setup.transtab); + + if (kbase_as_has_bus_fault(as)) { + kbase_mmu_hw_clear_fault(kbdev, as, kctx, + KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED); + kbase_mmu_hw_enable_fault(kbdev, as, kctx, + KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED); + } else if (kbase_as_has_page_fault(as)) { + kbase_mmu_hw_clear_fault(kbdev, as, kctx, + KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED); + kbase_mmu_hw_enable_fault(kbdev, as, kctx, + KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED); + } + +#if KBASE_GPU_RESET_EN + if (kbase_as_has_bus_fault(as) && + kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8245)) { + bool reset_status; + /* + * Reset the GPU, like in bus_fault_worker, in case an + * earlier error hasn't been properly cleared by this + * point. + */ + dev_err(kbdev->dev, "GPU bus error occurred. For this GPU version we now soft-reset as part of bus error recovery\n"); + reset_status = kbase_prepare_to_reset_gpu_locked(kbdev); + if (reset_status) + kbase_reset_gpu_locked(kbdev); + } +#endif /* KBASE_GPU_RESET_EN */ + + return; + } + + if (kbase_as_has_bus_fault(as)) { + /* + * hw counters dumping in progress, signal the + * other thread that it failed + */ + if ((kbdev->hwcnt.kctx == kctx) && + (kbdev->hwcnt.backend.state == + KBASE_INSTR_STATE_DUMPING)) + kbdev->hwcnt.backend.state = + KBASE_INSTR_STATE_FAULT; + + /* + * Stop the kctx from submitting more jobs and cause it + * to be scheduled out/rescheduled when all references + * to it are released + */ + kbasep_js_clear_submit_allowed(js_devdata, kctx); + + if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_AARCH64_MMU)) + dev_warn(kbdev->dev, + "Bus error in AS%d at VA=0x%016llx, IPA=0x%016llx\n", + as->number, as->fault_addr, + as->fault_extra_addr); + else + dev_warn(kbdev->dev, "Bus error in AS%d at 0x%016llx\n", + as->number, as->fault_addr); + + /* + * We need to switch to UNMAPPED mode - but we do this in a + * worker so that we can sleep + */ + WARN_ON(!queue_work(as->pf_wq, &as->work_busfault)); + atomic_inc(&kbdev->faults_pending); + } else { + WARN_ON(!queue_work(as->pf_wq, &as->work_pagefault)); + atomic_inc(&kbdev->faults_pending); + } +} + +void kbase_flush_mmu_wqs(struct kbase_device *kbdev) +{ + int i; + + for (i = 0; i < kbdev->nr_hw_address_spaces; i++) { + struct kbase_as *as = &kbdev->as[i]; + + flush_workqueue(as->pf_wq); + } +} diff --git a/drivers/gpu/arm/midgard/mali_kbase_mmu_hw.h b/drivers/gpu/arm/midgard/mali_kbase_mmu_hw.h new file mode 100644 index 00000000000000..986e959e9a0c77 --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_mmu_hw.h @@ -0,0 +1,123 @@ +/* + * + * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +/** + * @file + * Interface file for accessing MMU hardware functionality + */ + +/** + * @page mali_kbase_mmu_hw_page MMU hardware interface + * + * @section mali_kbase_mmu_hw_intro_sec Introduction + * This module provides an abstraction for accessing the functionality provided + * by the midgard MMU and thus allows all MMU HW access to be contained within + * one common place and allows for different backends (implementations) to + * be provided. + */ + +#ifndef _MALI_KBASE_MMU_HW_H_ +#define _MALI_KBASE_MMU_HW_H_ + +/* Forward declarations */ +struct kbase_device; +struct kbase_as; +struct kbase_context; + +/** + * @addtogroup base_kbase_api + * @{ + */ + +/** + * @addtogroup mali_kbase_mmu_hw MMU access APIs + * @{ + */ + +/** @brief MMU fault type descriptor. + */ +enum kbase_mmu_fault_type { + KBASE_MMU_FAULT_TYPE_UNKNOWN = 0, + KBASE_MMU_FAULT_TYPE_PAGE, + KBASE_MMU_FAULT_TYPE_BUS, + KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED, + KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED +}; + +/** @brief Configure an address space for use. + * + * Configure the MMU using the address space details setup in the + * @ref kbase_context structure. + * + * @param[in] kbdev kbase device to configure. + * @param[in] as address space to configure. + * @param[in] kctx kbase context to configure. + */ +void kbase_mmu_hw_configure(struct kbase_device *kbdev, + struct kbase_as *as, struct kbase_context *kctx); + +/** @brief Issue an operation to the MMU. + * + * Issue an operation (MMU invalidate, MMU flush, etc) on the address space that + * is associated with the provided @ref kbase_context over the specified range + * + * @param[in] kbdev kbase device to issue the MMU operation on. + * @param[in] as address space to issue the MMU operation on. + * @param[in] kctx kbase context to issue the MMU operation on. + * @param[in] vpfn MMU Virtual Page Frame Number to start the + * operation on. + * @param[in] nr Number of pages to work on. + * @param[in] type Operation type (written to ASn_COMMAND). + * @param[in] handling_irq Is this operation being called during the handling + * of an interrupt? + * + * @return Zero if the operation was successful, non-zero otherwise. + */ +int kbase_mmu_hw_do_operation(struct kbase_device *kbdev, struct kbase_as *as, + struct kbase_context *kctx, u64 vpfn, u32 nr, u32 type, + unsigned int handling_irq); + +/** @brief Clear a fault that has been previously reported by the MMU. + * + * Clear a bus error or page fault that has been reported by the MMU. + * + * @param[in] kbdev kbase device to clear the fault from. + * @param[in] as address space to clear the fault from. + * @param[in] kctx kbase context to clear the fault from or NULL. + * @param[in] type The type of fault that needs to be cleared. + */ +void kbase_mmu_hw_clear_fault(struct kbase_device *kbdev, struct kbase_as *as, + struct kbase_context *kctx, enum kbase_mmu_fault_type type); + +/** @brief Enable fault that has been previously reported by the MMU. + * + * After a page fault or bus error has been reported by the MMU these + * will be disabled. After these are handled this function needs to be + * called to enable the page fault or bus error fault again. + * + * @param[in] kbdev kbase device to again enable the fault from. + * @param[in] as address space to again enable the fault from. + * @param[in] kctx kbase context to again enable the fault from. + * @param[in] type The type of fault that needs to be enabled again. + */ +void kbase_mmu_hw_enable_fault(struct kbase_device *kbdev, struct kbase_as *as, + struct kbase_context *kctx, enum kbase_mmu_fault_type type); + +/** @} *//* end group mali_kbase_mmu_hw */ +/** @} *//* end group base_kbase_api */ + +#endif /* _MALI_KBASE_MMU_HW_H_ */ diff --git a/drivers/gpu/arm/midgard/mali_kbase_mmu_mode_aarch64.c b/drivers/gpu/arm/midgard/mali_kbase_mmu_mode_aarch64.c new file mode 100644 index 00000000000000..0fb717b67af92c --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_mmu_mode_aarch64.c @@ -0,0 +1,214 @@ +/* + * + * (C) COPYRIGHT 2010-2014, 2016-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + + +#include "mali_kbase.h" +#include "mali_midg_regmap.h" +#include "mali_kbase_defs.h" + +#define ENTRY_TYPE_MASK 3ULL +/* For valid ATEs bit 1 = ((level == 3) ? 1 : 0). + * Valid ATE entries at level 3 are flagged with the value 3. + * Valid ATE entries at level 0-2 are flagged with the value 1. + */ +#define ENTRY_IS_ATE_L3 3ULL +#define ENTRY_IS_ATE_L02 1ULL +#define ENTRY_IS_INVAL 2ULL +#define ENTRY_IS_PTE 3ULL + +#define ENTRY_ATTR_BITS (7ULL << 2) /* bits 4:2 */ +#define ENTRY_ACCESS_RW (1ULL << 6) /* bits 6:7 */ +#define ENTRY_ACCESS_RO (3ULL << 6) +#define ENTRY_SHARE_BITS (3ULL << 8) /* bits 9:8 */ +#define ENTRY_ACCESS_BIT (1ULL << 10) +#define ENTRY_NX_BIT (1ULL << 54) + +/* Helper Function to perform assignment of page table entries, to + * ensure the use of strd, which is required on LPAE systems. + */ +static inline void page_table_entry_set(u64 *pte, u64 phy) +{ +#ifdef CONFIG_64BIT + *pte = phy; +#elif defined(CONFIG_ARM) + /* + * In order to prevent the compiler keeping cached copies of + * memory, we have to explicitly say that we have updated memory. + * + * Note: We could manually move the data ourselves into R0 and + * R1 by specifying register variables that are explicitly + * given registers assignments, the down side of this is that + * we have to assume cpu endianness. To avoid this we can use + * the ldrd to read the data from memory into R0 and R1 which + * will respect the cpu endianness, we then use strd to make + * the 64 bit assignment to the page table entry. + */ + asm volatile("ldrd r0, r1, [%[ptemp]]\n\t" + "strd r0, r1, [%[pte]]\n\t" + : "=m" (*pte) + : [ptemp] "r" (&phy), [pte] "r" (pte), "m" (phy) + : "r0", "r1"); +#else +#error "64-bit atomic write must be implemented for your architecture" +#endif +} + +static void mmu_get_as_setup(struct kbase_context *kctx, + struct kbase_mmu_setup * const setup) +{ + /* Set up the required caching policies at the correct indices + * in the memattr register. + */ + setup->memattr = + (AS_MEMATTR_IMPL_DEF_CACHE_POLICY << + (AS_MEMATTR_INDEX_IMPL_DEF_CACHE_POLICY * 8)) | + (AS_MEMATTR_FORCE_TO_CACHE_ALL << + (AS_MEMATTR_INDEX_FORCE_TO_CACHE_ALL * 8)) | + (AS_MEMATTR_WRITE_ALLOC << + (AS_MEMATTR_INDEX_WRITE_ALLOC * 8)) | + (AS_MEMATTR_AARCH64_OUTER_IMPL_DEF << + (AS_MEMATTR_INDEX_OUTER_IMPL_DEF * 8)) | + (AS_MEMATTR_AARCH64_OUTER_WA << + (AS_MEMATTR_INDEX_OUTER_WA * 8)); + + setup->transtab = (u64)kctx->pgd & AS_TRANSTAB_BASE_MASK; + setup->transcfg = AS_TRANSCFG_ADRMODE_AARCH64_4K; +} + +static void mmu_update(struct kbase_context *kctx) +{ + struct kbase_device * const kbdev = kctx->kbdev; + struct kbase_as * const as = &kbdev->as[kctx->as_nr]; + struct kbase_mmu_setup * const current_setup = &as->current_setup; + + mmu_get_as_setup(kctx, current_setup); + + /* Apply the address space setting */ + kbase_mmu_hw_configure(kbdev, as, kctx); +} + +static void mmu_disable_as(struct kbase_device *kbdev, int as_nr) +{ + struct kbase_as * const as = &kbdev->as[as_nr]; + struct kbase_mmu_setup * const current_setup = &as->current_setup; + + current_setup->transtab = 0ULL; + current_setup->transcfg = AS_TRANSCFG_ADRMODE_UNMAPPED; + + /* Apply the address space setting */ + kbase_mmu_hw_configure(kbdev, as, NULL); +} + +static phys_addr_t pte_to_phy_addr(u64 entry) +{ + if (!(entry & 1)) + return 0; + + return entry & ~0xFFF; +} + +static int ate_is_valid(u64 ate, unsigned int level) +{ + if (level == MIDGARD_MMU_BOTTOMLEVEL) + return ((ate & ENTRY_TYPE_MASK) == ENTRY_IS_ATE_L3); + else + return ((ate & ENTRY_TYPE_MASK) == ENTRY_IS_ATE_L02); +} + +static int pte_is_valid(u64 pte, unsigned int level) +{ + /* PTEs cannot exist at the bottom level */ + if (level == MIDGARD_MMU_BOTTOMLEVEL) + return false; + return ((pte & ENTRY_TYPE_MASK) == ENTRY_IS_PTE); +} + +/* + * Map KBASE_REG flags to MMU flags + */ +static u64 get_mmu_flags(unsigned long flags) +{ + u64 mmu_flags; + + /* store mem_attr index as 4:2 (macro called ensures 3 bits already) */ + mmu_flags = KBASE_REG_MEMATTR_VALUE(flags) << 2; + + /* Set access flags - note that AArch64 stage 1 does not support + * write-only access, so we use read/write instead + */ + if (flags & KBASE_REG_GPU_WR) + mmu_flags |= ENTRY_ACCESS_RW; + else if (flags & KBASE_REG_GPU_RD) + mmu_flags |= ENTRY_ACCESS_RO; + + /* nx if requested */ + mmu_flags |= (flags & KBASE_REG_GPU_NX) ? ENTRY_NX_BIT : 0; + + if (flags & KBASE_REG_SHARE_BOTH) { + /* inner and outer shareable */ + mmu_flags |= SHARE_BOTH_BITS; + } else if (flags & KBASE_REG_SHARE_IN) { + /* inner shareable coherency */ + mmu_flags |= SHARE_INNER_BITS; + } + + return mmu_flags; +} + +static void entry_set_ate(u64 *entry, + struct tagged_addr phy, + unsigned long flags, + unsigned int level) +{ + if (level == MIDGARD_MMU_BOTTOMLEVEL) + page_table_entry_set(entry, as_phys_addr_t(phy) | + get_mmu_flags(flags) | + ENTRY_ACCESS_BIT | ENTRY_IS_ATE_L3); + else + page_table_entry_set(entry, as_phys_addr_t(phy) | + get_mmu_flags(flags) | + ENTRY_ACCESS_BIT | ENTRY_IS_ATE_L02); +} + +static void entry_set_pte(u64 *entry, phys_addr_t phy) +{ + page_table_entry_set(entry, (phy & PAGE_MASK) | + ENTRY_ACCESS_BIT | ENTRY_IS_PTE); +} + +static void entry_invalidate(u64 *entry) +{ + page_table_entry_set(entry, ENTRY_IS_INVAL); +} + +static struct kbase_mmu_mode const aarch64_mode = { + .update = mmu_update, + .get_as_setup = mmu_get_as_setup, + .disable_as = mmu_disable_as, + .pte_to_phy_addr = pte_to_phy_addr, + .ate_is_valid = ate_is_valid, + .pte_is_valid = pte_is_valid, + .entry_set_ate = entry_set_ate, + .entry_set_pte = entry_set_pte, + .entry_invalidate = entry_invalidate +}; + +struct kbase_mmu_mode const *kbase_mmu_mode_get_aarch64(void) +{ + return &aarch64_mode; +} diff --git a/drivers/gpu/arm/midgard/mali_kbase_mmu_mode_lpae.c b/drivers/gpu/arm/midgard/mali_kbase_mmu_mode_lpae.c new file mode 100644 index 00000000000000..f080fdc0be8865 --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_mmu_mode_lpae.c @@ -0,0 +1,199 @@ +/* + * + * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + + +#include "mali_kbase.h" +#include "mali_midg_regmap.h" +#include "mali_kbase_defs.h" + +#define ENTRY_TYPE_MASK 3ULL +#define ENTRY_IS_ATE 1ULL +#define ENTRY_IS_INVAL 2ULL +#define ENTRY_IS_PTE 3ULL + +#define ENTRY_ATTR_BITS (7ULL << 2) /* bits 4:2 */ +#define ENTRY_RD_BIT (1ULL << 6) +#define ENTRY_WR_BIT (1ULL << 7) +#define ENTRY_SHARE_BITS (3ULL << 8) /* bits 9:8 */ +#define ENTRY_ACCESS_BIT (1ULL << 10) +#define ENTRY_NX_BIT (1ULL << 54) + +#define ENTRY_FLAGS_MASK (ENTRY_ATTR_BITS | ENTRY_RD_BIT | ENTRY_WR_BIT | \ + ENTRY_SHARE_BITS | ENTRY_ACCESS_BIT | ENTRY_NX_BIT) + +/* Helper Function to perform assignment of page table entries, to + * ensure the use of strd, which is required on LPAE systems. + */ +static inline void page_table_entry_set(u64 *pte, u64 phy) +{ +#ifdef CONFIG_64BIT + *pte = phy; +#elif defined(CONFIG_ARM) + /* + * In order to prevent the compiler keeping cached copies of + * memory, we have to explicitly say that we have updated + * memory. + * + * Note: We could manually move the data ourselves into R0 and + * R1 by specifying register variables that are explicitly + * given registers assignments, the down side of this is that + * we have to assume cpu endianness. To avoid this we can use + * the ldrd to read the data from memory into R0 and R1 which + * will respect the cpu endianness, we then use strd to make + * the 64 bit assignment to the page table entry. + */ + asm volatile("ldrd r0, r1, [%[ptemp]]\n\t" + "strd r0, r1, [%[pte]]\n\t" + : "=m" (*pte) + : [ptemp] "r" (&phy), [pte] "r" (pte), "m" (phy) + : "r0", "r1"); +#else +#error "64-bit atomic write must be implemented for your architecture" +#endif +} + +static void mmu_get_as_setup(struct kbase_context *kctx, + struct kbase_mmu_setup * const setup) +{ + /* Set up the required caching policies at the correct indices + * in the memattr register. */ + setup->memattr = + (AS_MEMATTR_LPAE_IMPL_DEF_CACHE_POLICY << + (AS_MEMATTR_INDEX_IMPL_DEF_CACHE_POLICY * 8)) | + (AS_MEMATTR_LPAE_FORCE_TO_CACHE_ALL << + (AS_MEMATTR_INDEX_FORCE_TO_CACHE_ALL * 8)) | + (AS_MEMATTR_LPAE_WRITE_ALLOC << + (AS_MEMATTR_INDEX_WRITE_ALLOC * 8)) | + (AS_MEMATTR_LPAE_OUTER_IMPL_DEF << + (AS_MEMATTR_INDEX_OUTER_IMPL_DEF * 8)) | + (AS_MEMATTR_LPAE_OUTER_WA << + (AS_MEMATTR_INDEX_OUTER_WA * 8)) | + 0; /* The other indices are unused for now */ + + setup->transtab = ((u64)kctx->pgd & + ((0xFFFFFFFFULL << 32) | AS_TRANSTAB_LPAE_ADDR_SPACE_MASK)) | + AS_TRANSTAB_LPAE_ADRMODE_TABLE | + AS_TRANSTAB_LPAE_READ_INNER; + + setup->transcfg = 0; +} + +static void mmu_update(struct kbase_context *kctx) +{ + struct kbase_device * const kbdev = kctx->kbdev; + struct kbase_as * const as = &kbdev->as[kctx->as_nr]; + struct kbase_mmu_setup * const current_setup = &as->current_setup; + + mmu_get_as_setup(kctx, current_setup); + + /* Apply the address space setting */ + kbase_mmu_hw_configure(kbdev, as, kctx); +} + +static void mmu_disable_as(struct kbase_device *kbdev, int as_nr) +{ + struct kbase_as * const as = &kbdev->as[as_nr]; + struct kbase_mmu_setup * const current_setup = &as->current_setup; + + current_setup->transtab = AS_TRANSTAB_LPAE_ADRMODE_UNMAPPED; + + /* Apply the address space setting */ + kbase_mmu_hw_configure(kbdev, as, NULL); +} + +static phys_addr_t pte_to_phy_addr(u64 entry) +{ + if (!(entry & 1)) + return 0; + + return entry & ~0xFFF; +} + +static int ate_is_valid(u64 ate, unsigned int level) +{ + return ((ate & ENTRY_TYPE_MASK) == ENTRY_IS_ATE); +} + +static int pte_is_valid(u64 pte, unsigned int level) +{ + return ((pte & ENTRY_TYPE_MASK) == ENTRY_IS_PTE); +} + +/* + * Map KBASE_REG flags to MMU flags + */ +static u64 get_mmu_flags(unsigned long flags) +{ + u64 mmu_flags; + + /* store mem_attr index as 4:2 (macro called ensures 3 bits already) */ + mmu_flags = KBASE_REG_MEMATTR_VALUE(flags) << 2; + + /* write perm if requested */ + mmu_flags |= (flags & KBASE_REG_GPU_WR) ? ENTRY_WR_BIT : 0; + /* read perm if requested */ + mmu_flags |= (flags & KBASE_REG_GPU_RD) ? ENTRY_RD_BIT : 0; + /* nx if requested */ + mmu_flags |= (flags & KBASE_REG_GPU_NX) ? ENTRY_NX_BIT : 0; + + if (flags & KBASE_REG_SHARE_BOTH) { + /* inner and outer shareable */ + mmu_flags |= SHARE_BOTH_BITS; + } else if (flags & KBASE_REG_SHARE_IN) { + /* inner shareable coherency */ + mmu_flags |= SHARE_INNER_BITS; + } + + return mmu_flags; +} + +static void entry_set_ate(u64 *entry, + struct tagged_addr phy, + unsigned long flags, + unsigned int level) +{ + page_table_entry_set(entry, as_phys_addr_t(phy) | get_mmu_flags(flags) | + ENTRY_IS_ATE); +} + +static void entry_set_pte(u64 *entry, phys_addr_t phy) +{ + page_table_entry_set(entry, (phy & ~0xFFF) | ENTRY_IS_PTE); +} + +static void entry_invalidate(u64 *entry) +{ + page_table_entry_set(entry, ENTRY_IS_INVAL); +} + +static struct kbase_mmu_mode const lpae_mode = { + .update = mmu_update, + .get_as_setup = mmu_get_as_setup, + .disable_as = mmu_disable_as, + .pte_to_phy_addr = pte_to_phy_addr, + .ate_is_valid = ate_is_valid, + .pte_is_valid = pte_is_valid, + .entry_set_ate = entry_set_ate, + .entry_set_pte = entry_set_pte, + .entry_invalidate = entry_invalidate +}; + +struct kbase_mmu_mode const *kbase_mmu_mode_get_lpae(void) +{ + return &lpae_mode; +} diff --git a/drivers/gpu/arm/midgard/mali_kbase_platform_fake.c b/drivers/gpu/arm/midgard/mali_kbase_platform_fake.c new file mode 100644 index 00000000000000..0152b35f711b4b --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_platform_fake.c @@ -0,0 +1,119 @@ +/* + * + * (C) COPYRIGHT 2011-2014, 2016-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +#include +#include +#include +#include +#include + + +/* + * This file is included only for type definitions and functions belonging to + * specific platform folders. Do not add dependencies with symbols that are + * defined somewhere else. + */ +#include + +#define PLATFORM_CONFIG_RESOURCE_COUNT 4 +#define PLATFORM_CONFIG_IRQ_RES_COUNT 3 + +static struct platform_device *mali_device; + +#ifndef CONFIG_OF +/** + * @brief Convert data in struct kbase_io_resources struct to Linux-specific resources + * + * Function converts data in struct kbase_io_resources struct to an array of Linux resource structures. Note that function + * assumes that size of linux_resource array is at least PLATFORM_CONFIG_RESOURCE_COUNT. + * Resources are put in fixed order: I/O memory region, job IRQ, MMU IRQ, GPU IRQ. + * + * @param[in] io_resource Input IO resource data + * @param[out] linux_resources Pointer to output array of Linux resource structures + */ +static void kbasep_config_parse_io_resources(const struct kbase_io_resources *io_resources, struct resource *const linux_resources) +{ + if (!io_resources || !linux_resources) { + pr_err("%s: couldn't find proper resources\n", __func__); + return; + } + + memset(linux_resources, 0, PLATFORM_CONFIG_RESOURCE_COUNT * sizeof(struct resource)); + + linux_resources[0].start = io_resources->io_memory_region.start; + linux_resources[0].end = io_resources->io_memory_region.end; + linux_resources[0].flags = IORESOURCE_MEM; + + linux_resources[1].start = io_resources->job_irq_number; + linux_resources[1].end = io_resources->job_irq_number; + linux_resources[1].flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL; + + linux_resources[2].start = io_resources->mmu_irq_number; + linux_resources[2].end = io_resources->mmu_irq_number; + linux_resources[2].flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL; + + linux_resources[3].start = io_resources->gpu_irq_number; + linux_resources[3].end = io_resources->gpu_irq_number; + linux_resources[3].flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL; +} +#endif /* CONFIG_OF */ + +int kbase_platform_register(void) +{ + struct kbase_platform_config *config; +#ifndef CONFIG_OF + struct resource resources[PLATFORM_CONFIG_RESOURCE_COUNT]; +#endif + int err; + + config = kbase_get_platform_config(); /* declared in midgard/mali_kbase_config.h but defined in platform folder */ + if (config == NULL) { + pr_err("%s: couldn't get platform config\n", __func__); + return -ENODEV; + } + + mali_device = platform_device_alloc("mali", 0); + if (mali_device == NULL) + return -ENOMEM; + +#ifndef CONFIG_OF + kbasep_config_parse_io_resources(config->io_resources, resources); + err = platform_device_add_resources(mali_device, resources, PLATFORM_CONFIG_RESOURCE_COUNT); + if (err) { + platform_device_put(mali_device); + mali_device = NULL; + return err; + } +#endif /* CONFIG_OF */ + + err = platform_device_add(mali_device); + if (err) { + platform_device_unregister(mali_device); + mali_device = NULL; + return err; + } + + return 0; +} +EXPORT_SYMBOL(kbase_platform_register); + +void kbase_platform_unregister(void) +{ + if (mali_device) + platform_device_unregister(mali_device); +} +EXPORT_SYMBOL(kbase_platform_unregister); diff --git a/drivers/gpu/arm/midgard/mali_kbase_pm.c b/drivers/gpu/arm/midgard/mali_kbase_pm.c new file mode 100644 index 00000000000000..97d543464c28bb --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_pm.c @@ -0,0 +1,205 @@ +/* + * + * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + + +/** + * @file mali_kbase_pm.c + * Base kernel power management APIs + */ + +#include +#include +#include + +#include + +int kbase_pm_powerup(struct kbase_device *kbdev, unsigned int flags) +{ + return kbase_hwaccess_pm_powerup(kbdev, flags); +} + +void kbase_pm_halt(struct kbase_device *kbdev) +{ + kbase_hwaccess_pm_halt(kbdev); +} + +void kbase_pm_context_active(struct kbase_device *kbdev) +{ + (void)kbase_pm_context_active_handle_suspend(kbdev, KBASE_PM_SUSPEND_HANDLER_NOT_POSSIBLE); +} + +int kbase_pm_context_active_handle_suspend(struct kbase_device *kbdev, enum kbase_pm_suspend_handler suspend_handler) +{ + struct kbasep_js_device_data *js_devdata = &kbdev->js_data; + int c; + int old_count; + + KBASE_DEBUG_ASSERT(kbdev != NULL); + + /* Trace timeline information about how long it took to handle the decision + * to powerup. Sometimes the event might be missed due to reading the count + * outside of mutex, but this is necessary to get the trace timing + * correct. */ + old_count = kbdev->pm.active_count; + if (old_count == 0) + kbase_timeline_pm_send_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_ACTIVE); + + mutex_lock(&js_devdata->runpool_mutex); + mutex_lock(&kbdev->pm.lock); + if (kbase_pm_is_suspending(kbdev)) { + switch (suspend_handler) { + case KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE: + if (kbdev->pm.active_count != 0) + break; + /* FALLTHROUGH */ + case KBASE_PM_SUSPEND_HANDLER_DONT_INCREASE: + mutex_unlock(&kbdev->pm.lock); + mutex_unlock(&js_devdata->runpool_mutex); + if (old_count == 0) + kbase_timeline_pm_handle_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_ACTIVE); + return 1; + + case KBASE_PM_SUSPEND_HANDLER_NOT_POSSIBLE: + /* FALLTHROUGH */ + default: + KBASE_DEBUG_ASSERT_MSG(false, "unreachable"); + break; + } + } + c = ++kbdev->pm.active_count; + KBASE_TIMELINE_CONTEXT_ACTIVE(kbdev, c); + KBASE_TRACE_ADD_REFCOUNT(kbdev, PM_CONTEXT_ACTIVE, NULL, NULL, 0u, c); + + /* Trace the event being handled */ + if (old_count == 0) + kbase_timeline_pm_handle_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_ACTIVE); + + if (c == 1) + /* First context active: Power on the GPU and any cores requested by + * the policy */ + kbase_hwaccess_pm_gpu_active(kbdev); + + mutex_unlock(&kbdev->pm.lock); + mutex_unlock(&js_devdata->runpool_mutex); + + return 0; +} + +KBASE_EXPORT_TEST_API(kbase_pm_context_active); + +void kbase_pm_context_idle(struct kbase_device *kbdev) +{ + struct kbasep_js_device_data *js_devdata = &kbdev->js_data; + int c; + int old_count; + + KBASE_DEBUG_ASSERT(kbdev != NULL); + + /* Trace timeline information about how long it took to handle the decision + * to powerdown. Sometimes the event might be missed due to reading the + * count outside of mutex, but this is necessary to get the trace timing + * correct. */ + old_count = kbdev->pm.active_count; + if (old_count == 0) + kbase_timeline_pm_send_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_IDLE); + + mutex_lock(&js_devdata->runpool_mutex); + mutex_lock(&kbdev->pm.lock); + + c = --kbdev->pm.active_count; + KBASE_TIMELINE_CONTEXT_ACTIVE(kbdev, c); + KBASE_TRACE_ADD_REFCOUNT(kbdev, PM_CONTEXT_IDLE, NULL, NULL, 0u, c); + + KBASE_DEBUG_ASSERT(c >= 0); + + /* Trace the event being handled */ + if (old_count == 0) + kbase_timeline_pm_handle_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_IDLE); + + if (c == 0) { + /* Last context has gone idle */ + kbase_hwaccess_pm_gpu_idle(kbdev); + + /* Wake up anyone waiting for this to become 0 (e.g. suspend). The + * waiters must synchronize with us by locking the pm.lock after + * waiting */ + wake_up(&kbdev->pm.zero_active_count_wait); + } + + mutex_unlock(&kbdev->pm.lock); + mutex_unlock(&js_devdata->runpool_mutex); +} + +KBASE_EXPORT_TEST_API(kbase_pm_context_idle); + +void kbase_pm_suspend(struct kbase_device *kbdev) +{ + KBASE_DEBUG_ASSERT(kbdev); + + /* Suspend vinstr. + * This call will block until vinstr is suspended. */ + kbase_vinstr_suspend(kbdev->vinstr_ctx); + + mutex_lock(&kbdev->pm.lock); + KBASE_DEBUG_ASSERT(!kbase_pm_is_suspending(kbdev)); + kbdev->pm.suspending = true; + mutex_unlock(&kbdev->pm.lock); + + /* From now on, the active count will drop towards zero. Sometimes, it'll + * go up briefly before going down again. However, once it reaches zero it + * will stay there - guaranteeing that we've idled all pm references */ + + /* Suspend job scheduler and associated components, so that it releases all + * the PM active count references */ + kbasep_js_suspend(kbdev); + + /* Wait for the active count to reach zero. This is not the same as + * waiting for a power down, since not all policies power down when this + * reaches zero. */ + wait_event(kbdev->pm.zero_active_count_wait, kbdev->pm.active_count == 0); + + /* NOTE: We synchronize with anything that was just finishing a + * kbase_pm_context_idle() call by locking the pm.lock below */ + + kbase_hwaccess_pm_suspend(kbdev); +} + +void kbase_pm_resume(struct kbase_device *kbdev) +{ + /* MUST happen before any pm_context_active calls occur */ + kbase_hwaccess_pm_resume(kbdev); + + /* Initial active call, to power on the GPU/cores if needed */ + kbase_pm_context_active(kbdev); + + /* Resume any blocked atoms (which may cause contexts to be scheduled in + * and dependent atoms to run) */ + kbase_resume_suspended_soft_jobs(kbdev); + + /* Resume the Job Scheduler and associated components, and start running + * atoms */ + kbasep_js_resume(kbdev); + + /* Matching idle call, to power off the GPU/cores if we didn't actually + * need it and the policy doesn't want it on */ + kbase_pm_context_idle(kbdev); + + /* Resume vinstr operation */ + kbase_vinstr_resume(kbdev->vinstr_ctx); +} + diff --git a/drivers/gpu/arm/midgard/mali_kbase_pm.h b/drivers/gpu/arm/midgard/mali_kbase_pm.h new file mode 100644 index 00000000000000..37fa2479df74ba --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_pm.h @@ -0,0 +1,171 @@ +/* + * + * (C) COPYRIGHT 2010-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + + +/** + * @file mali_kbase_pm.h + * Power management API definitions + */ + +#ifndef _KBASE_PM_H_ +#define _KBASE_PM_H_ + +#include "mali_kbase_hwaccess_pm.h" + +#define PM_ENABLE_IRQS 0x01 +#define PM_HW_ISSUES_DETECT 0x02 + + +/** Initialize the power management framework. + * + * Must be called before any other power management function + * + * @param kbdev The kbase device structure for the device (must be a valid pointer) + * + * @return 0 if the power management framework was successfully initialized. + */ +int kbase_pm_init(struct kbase_device *kbdev); + +/** Power up GPU after all modules have been initialized and interrupt handlers installed. + * + * @param kbdev The kbase device structure for the device (must be a valid pointer) + * + * @param flags Flags to pass on to kbase_pm_init_hw + * + * @return 0 if powerup was successful. + */ +int kbase_pm_powerup(struct kbase_device *kbdev, unsigned int flags); + +/** + * Halt the power management framework. + * Should ensure that no new interrupts are generated, + * but allow any currently running interrupt handlers to complete successfully. + * The GPU is forced off by the time this function returns, regardless of + * whether or not the active power policy asks for the GPU to be powered off. + * + * @param kbdev The kbase device structure for the device (must be a valid pointer) + */ +void kbase_pm_halt(struct kbase_device *kbdev); + +/** Terminate the power management framework. + * + * No power management functions may be called after this + * (except @ref kbase_pm_init) + * + * @param kbdev The kbase device structure for the device (must be a valid pointer) + */ +void kbase_pm_term(struct kbase_device *kbdev); + +/** Increment the count of active contexts. + * + * This function should be called when a context is about to submit a job. It informs the active power policy that the + * GPU is going to be in use shortly and the policy is expected to start turning on the GPU. + * + * This function will block until the GPU is available. + * + * This function ASSERTS if a suspend is occuring/has occurred whilst this is + * in use. Use kbase_pm_contect_active_unless_suspending() instead. + * + * @note a Suspend is only visible to Kernel threads; user-space threads in a + * syscall cannot witness a suspend, because they are frozen before the suspend + * begins. + * + * @param kbdev The kbase device structure for the device (must be a valid pointer) + */ +void kbase_pm_context_active(struct kbase_device *kbdev); + + +/** Handler codes for doing kbase_pm_context_active_handle_suspend() */ +enum kbase_pm_suspend_handler { + /** A suspend is not expected/not possible - this is the same as + * kbase_pm_context_active() */ + KBASE_PM_SUSPEND_HANDLER_NOT_POSSIBLE, + /** If we're suspending, fail and don't increase the active count */ + KBASE_PM_SUSPEND_HANDLER_DONT_INCREASE, + /** If we're suspending, succeed and allow the active count to increase iff + * it didn't go from 0->1 (i.e., we didn't re-activate the GPU). + * + * This should only be used when there is a bounded time on the activation + * (e.g. guarantee it's going to be idled very soon after) */ + KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE +}; + +/** Suspend 'safe' variant of kbase_pm_context_active() + * + * If a suspend is in progress, this allows for various different ways of + * handling the suspend. Refer to @ref enum kbase_pm_suspend_handler for details. + * + * We returns a status code indicating whether we're allowed to keep the GPU + * active during the suspend, depending on the handler code. If the status code + * indicates a failure, the caller must abort whatever operation it was + * attempting, and potentially queue it up for after the OS has resumed. + * + * @param kbdev The kbase device structure for the device (must be a valid pointer) + * @param suspend_handler The handler code for how to handle a suspend that might occur + * @return zero Indicates success + * @return non-zero Indicates failure due to the system being suspending/suspended. + */ +int kbase_pm_context_active_handle_suspend(struct kbase_device *kbdev, enum kbase_pm_suspend_handler suspend_handler); + +/** Decrement the reference count of active contexts. + * + * This function should be called when a context becomes idle. After this call the GPU may be turned off by the power + * policy so the calling code should ensure that it does not access the GPU's registers. + * + * @param kbdev The kbase device structure for the device (must be a valid pointer) + */ +void kbase_pm_context_idle(struct kbase_device *kbdev); + +/** + * Suspend the GPU and prevent any further register accesses to it from Kernel + * threads. + * + * This is called in response to an OS suspend event, and calls into the various + * kbase components to complete the suspend. + * + * @note the mechanisms used here rely on all user-space threads being frozen + * by the OS before we suspend. Otherwise, an IOCTL could occur that powers up + * the GPU e.g. via atom submission. + * + * @param kbdev The kbase device structure for the device (must be a valid pointer) + */ +void kbase_pm_suspend(struct kbase_device *kbdev); + +/** + * Resume the GPU, allow register accesses to it, and resume running atoms on + * the GPU. + * + * This is called in response to an OS resume event, and calls into the various + * kbase components to complete the resume. + * + * @param kbdev The kbase device structure for the device (must be a valid pointer) + */ +void kbase_pm_resume(struct kbase_device *kbdev); + +/** + * kbase_pm_vsync_callback - vsync callback + * + * @buffer_updated: 1 if a new frame was displayed, 0 otherwise + * @data: Pointer to the kbase device as returned by kbase_find_device() + * + * Callback function used to notify the power management code that a vsync has + * occurred on the display. + */ +void kbase_pm_vsync_callback(int buffer_updated, void *data); + +#endif /* _KBASE_PM_H_ */ diff --git a/drivers/gpu/arm/midgard/mali_kbase_profiling_gator_api.h b/drivers/gpu/arm/midgard/mali_kbase_profiling_gator_api.h new file mode 100644 index 00000000000000..7fb674eded3734 --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_profiling_gator_api.h @@ -0,0 +1,40 @@ +/* + * + * (C) COPYRIGHT 2010, 2013 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +/** + * @file mali_kbase_profiling_gator_api.h + * Model interface + */ + +#ifndef _KBASE_PROFILING_GATOR_API_H_ +#define _KBASE_PROFILING_GATOR_API_H_ + +/* + * List of possible actions to be controlled by Streamline. + * The following numbers are used by gator to control + * the frame buffer dumping and s/w counter reporting. + */ +#define FBDUMP_CONTROL_ENABLE (1) +#define FBDUMP_CONTROL_RATE (2) +#define SW_COUNTER_ENABLE (3) +#define FBDUMP_CONTROL_RESIZE_FACTOR (4) +#define FBDUMP_CONTROL_MAX (5) +#define FBDUMP_CONTROL_MIN FBDUMP_CONTROL_ENABLE + +void _mali_profiling_control(u32 action, u32 value); + +#endif /* _KBASE_PROFILING_GATOR_API */ diff --git a/drivers/gpu/arm/midgard/mali_kbase_regs_history_debugfs.c b/drivers/gpu/arm/midgard/mali_kbase_regs_history_debugfs.c new file mode 100644 index 00000000000000..c970650069cd23 --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_regs_history_debugfs.c @@ -0,0 +1,130 @@ +/* + * + * (C) COPYRIGHT 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +#include "mali_kbase.h" + +#include "mali_kbase_regs_history_debugfs.h" + +#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_MALI_NO_MALI) + +#include + + +static int regs_history_size_get(void *data, u64 *val) +{ + struct kbase_io_history *const h = data; + + *val = h->size; + + return 0; +} + +static int regs_history_size_set(void *data, u64 val) +{ + struct kbase_io_history *const h = data; + + return kbase_io_history_resize(h, (u16)val); +} + + +DEFINE_SIMPLE_ATTRIBUTE(regs_history_size_fops, + regs_history_size_get, + regs_history_size_set, + "%llu\n"); + + +/** + * regs_history_show - show callback for the register access history file. + * + * @sfile: The debugfs entry + * @data: Data associated with the entry + * + * This function is called to dump all recent accesses to the GPU registers. + * + * @return 0 if successfully prints data in debugfs entry file, failure + * otherwise + */ +static int regs_history_show(struct seq_file *sfile, void *data) +{ + struct kbase_io_history *const h = sfile->private; + u16 i; + size_t iters; + unsigned long flags; + + if (!h->enabled) { + seq_puts(sfile, "The register access history is disabled\n"); + goto out; + } + + spin_lock_irqsave(&h->lock, flags); + + iters = (h->size > h->count) ? h->count : h->size; + seq_printf(sfile, "Last %zu register accesses of %zu total:\n", iters, + h->count); + for (i = 0; i < iters; ++i) { + struct kbase_io_access *io = + &h->buf[(h->count - iters + i) % h->size]; + char const access = (io->addr & 1) ? 'w' : 'r'; + + seq_printf(sfile, "%6i: %c: reg 0x%p val %08x\n", i, access, + (void *)(io->addr & ~0x1), io->value); + } + + spin_unlock_irqrestore(&h->lock, flags); + +out: + return 0; +} + + +/** + * regs_history_open - open operation for regs_history debugfs file + * + * @in: &struct inode pointer + * @file: &struct file pointer + * + * @return file descriptor + */ +static int regs_history_open(struct inode *in, struct file *file) +{ + return single_open(file, ®s_history_show, in->i_private); +} + + +static const struct file_operations regs_history_fops = { + .open = ®s_history_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + + +void kbasep_regs_history_debugfs_init(struct kbase_device *kbdev) +{ + debugfs_create_bool("regs_history_enabled", S_IRUGO | S_IWUSR, + kbdev->mali_debugfs_directory, + &kbdev->io_history.enabled); + debugfs_create_file("regs_history_size", S_IRUGO | S_IWUSR, + kbdev->mali_debugfs_directory, + &kbdev->io_history, ®s_history_size_fops); + debugfs_create_file("regs_history", S_IRUGO, + kbdev->mali_debugfs_directory, &kbdev->io_history, + ®s_history_fops); +} + + +#endif /* CONFIG_DEBUG_FS */ diff --git a/drivers/gpu/arm/midgard/mali_kbase_regs_history_debugfs.h b/drivers/gpu/arm/midgard/mali_kbase_regs_history_debugfs.h new file mode 100644 index 00000000000000..f1083700233094 --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_regs_history_debugfs.h @@ -0,0 +1,50 @@ +/* + * + * (C) COPYRIGHT 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +/** + * Header file for register access history support via debugfs + * + * This interface is made available via /sys/kernel/debug/mali#/regs_history*. + * + * Usage: + * - regs_history_enabled: whether recording of register accesses is enabled. + * Write 'y' to enable, 'n' to disable. + * - regs_history_size: size of the register history buffer, must be > 0 + * - regs_history: return the information about last accesses to the registers. + */ + +#ifndef _KBASE_REGS_HISTORY_DEBUGFS_H +#define _KBASE_REGS_HISTORY_DEBUGFS_H + +struct kbase_device; + +#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_MALI_NO_MALI) + +/** + * kbasep_regs_history_debugfs_init - add debugfs entries for register history + * + * @kbdev: Pointer to kbase_device containing the register history + */ +void kbasep_regs_history_debugfs_init(struct kbase_device *kbdev); + +#else /* CONFIG_DEBUG_FS */ + +#define kbasep_regs_history_debugfs_init CSTD_NOP + +#endif /* CONFIG_DEBUG_FS */ + +#endif /*_KBASE_REGS_HISTORY_DEBUGFS_H*/ diff --git a/drivers/gpu/arm/midgard/mali_kbase_replay.c b/drivers/gpu/arm/midgard/mali_kbase_replay.c new file mode 100644 index 00000000000000..2f8eccfc1757a7 --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_replay.c @@ -0,0 +1,1166 @@ +/* + * + * (C) COPYRIGHT 2014-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +/** + * @file mali_kbase_replay.c + * Replay soft job handlers + */ + +#include +#include +#include +#include +#include + +#define JOB_NOT_STARTED 0 +#define JOB_TYPE_NULL (1) +#define JOB_TYPE_VERTEX (5) +#define JOB_TYPE_TILER (7) +#define JOB_TYPE_FUSED (8) +#define JOB_TYPE_FRAGMENT (9) + +#define JOB_HEADER_32_FBD_OFFSET (31*4) +#define JOB_HEADER_64_FBD_OFFSET (44*4) + +#define FBD_POINTER_MASK (~0x3f) + +#define SFBD_TILER_OFFSET (48*4) + +#define MFBD_TILER_OFFSET (14*4) + +#define FBD_HIERARCHY_WEIGHTS 8 +#define FBD_HIERARCHY_MASK_MASK 0x1fff + +#define FBD_TYPE 1 + +#define HIERARCHY_WEIGHTS 13 + +#define JOB_HEADER_ID_MAX 0xffff + +#define JOB_SOURCE_ID(status) (((status) >> 16) & 0xFFFF) +#define JOB_POLYGON_LIST (0x03) + +struct fragment_job { + struct job_descriptor_header header; + + u32 x[2]; + union { + u64 _64; + u32 _32; + } fragment_fbd; +}; + +static void dump_job_head(struct kbase_context *kctx, char *head_str, + struct job_descriptor_header *job) +{ +#ifdef CONFIG_MALI_DEBUG + dev_dbg(kctx->kbdev->dev, "%s\n", head_str); + dev_dbg(kctx->kbdev->dev, + "addr = %p\n" + "exception_status = %x (Source ID: 0x%x Access: 0x%x Exception: 0x%x)\n" + "first_incomplete_task = %x\n" + "fault_pointer = %llx\n" + "job_descriptor_size = %x\n" + "job_type = %x\n" + "job_barrier = %x\n" + "_reserved_01 = %x\n" + "_reserved_02 = %x\n" + "_reserved_03 = %x\n" + "_reserved_04/05 = %x,%x\n" + "job_index = %x\n" + "dependencies = %x,%x\n", + job, job->exception_status, + JOB_SOURCE_ID(job->exception_status), + (job->exception_status >> 8) & 0x3, + job->exception_status & 0xFF, + job->first_incomplete_task, + job->fault_pointer, job->job_descriptor_size, + job->job_type, job->job_barrier, job->_reserved_01, + job->_reserved_02, job->_reserved_03, + job->_reserved_04, job->_reserved_05, + job->job_index, + job->job_dependency_index_1, + job->job_dependency_index_2); + + if (job->job_descriptor_size) + dev_dbg(kctx->kbdev->dev, "next = %llx\n", + job->next_job._64); + else + dev_dbg(kctx->kbdev->dev, "next = %x\n", + job->next_job._32); +#endif +} + +static int kbasep_replay_reset_sfbd(struct kbase_context *kctx, + u64 fbd_address, u64 tiler_heap_free, + u16 hierarchy_mask, u32 default_weight) +{ + struct { + u32 padding_1[1]; + u32 flags; + u64 padding_2[2]; + u64 heap_free_address; + u32 padding[8]; + u32 weights[FBD_HIERARCHY_WEIGHTS]; + } *fbd_tiler; + struct kbase_vmap_struct map; + + dev_dbg(kctx->kbdev->dev, "fbd_address: %llx\n", fbd_address); + + fbd_tiler = kbase_vmap(kctx, fbd_address + SFBD_TILER_OFFSET, + sizeof(*fbd_tiler), &map); + if (!fbd_tiler) { + dev_err(kctx->kbdev->dev, "kbasep_replay_reset_fbd: failed to map fbd\n"); + return -EINVAL; + } + +#ifdef CONFIG_MALI_DEBUG + dev_dbg(kctx->kbdev->dev, + "FBD tiler:\n" + "flags = %x\n" + "heap_free_address = %llx\n", + fbd_tiler->flags, fbd_tiler->heap_free_address); +#endif + if (hierarchy_mask) { + u32 weights[HIERARCHY_WEIGHTS]; + u16 old_hierarchy_mask = fbd_tiler->flags & + FBD_HIERARCHY_MASK_MASK; + int i, j = 0; + + for (i = 0; i < HIERARCHY_WEIGHTS; i++) { + if (old_hierarchy_mask & (1 << i)) { + KBASE_DEBUG_ASSERT(j < FBD_HIERARCHY_WEIGHTS); + weights[i] = fbd_tiler->weights[j++]; + } else { + weights[i] = default_weight; + } + } + + + dev_dbg(kctx->kbdev->dev, "Old hierarchy mask=%x New hierarchy mask=%x\n", + old_hierarchy_mask, hierarchy_mask); + + for (i = 0; i < HIERARCHY_WEIGHTS; i++) + dev_dbg(kctx->kbdev->dev, " Hierarchy weight %02d: %08x\n", + i, weights[i]); + + j = 0; + + for (i = 0; i < HIERARCHY_WEIGHTS; i++) { + if (hierarchy_mask & (1 << i)) { + KBASE_DEBUG_ASSERT(j < FBD_HIERARCHY_WEIGHTS); + + dev_dbg(kctx->kbdev->dev, " Writing hierarchy level %02d (%08x) to %d\n", + i, weights[i], j); + + fbd_tiler->weights[j++] = weights[i]; + } + } + + for (; j < FBD_HIERARCHY_WEIGHTS; j++) + fbd_tiler->weights[j] = 0; + + fbd_tiler->flags = hierarchy_mask | (1 << 16); + } + + fbd_tiler->heap_free_address = tiler_heap_free; + + dev_dbg(kctx->kbdev->dev, "heap_free_address=%llx flags=%x\n", + fbd_tiler->heap_free_address, fbd_tiler->flags); + + kbase_vunmap(kctx, &map); + + return 0; +} + +static int kbasep_replay_reset_mfbd(struct kbase_context *kctx, + u64 fbd_address, u64 tiler_heap_free, + u16 hierarchy_mask, u32 default_weight) +{ + struct kbase_vmap_struct map; + struct { + u32 padding_0; + u32 flags; + u64 padding_1[2]; + u64 heap_free_address; + u64 padding_2; + u32 weights[FBD_HIERARCHY_WEIGHTS]; + } *fbd_tiler; + + dev_dbg(kctx->kbdev->dev, "fbd_address: %llx\n", fbd_address); + + fbd_tiler = kbase_vmap(kctx, fbd_address + MFBD_TILER_OFFSET, + sizeof(*fbd_tiler), &map); + if (!fbd_tiler) { + dev_err(kctx->kbdev->dev, + "kbasep_replay_reset_fbd: failed to map fbd\n"); + return -EINVAL; + } + +#ifdef CONFIG_MALI_DEBUG + dev_dbg(kctx->kbdev->dev, "FBD tiler:\n" + "flags = %x\n" + "heap_free_address = %llx\n", + fbd_tiler->flags, + fbd_tiler->heap_free_address); +#endif + if (hierarchy_mask) { + u32 weights[HIERARCHY_WEIGHTS]; + u16 old_hierarchy_mask = (fbd_tiler->flags) & + FBD_HIERARCHY_MASK_MASK; + int i, j = 0; + + for (i = 0; i < HIERARCHY_WEIGHTS; i++) { + if (old_hierarchy_mask & (1 << i)) { + KBASE_DEBUG_ASSERT(j < FBD_HIERARCHY_WEIGHTS); + weights[i] = fbd_tiler->weights[j++]; + } else { + weights[i] = default_weight; + } + } + + + dev_dbg(kctx->kbdev->dev, "Old hierarchy mask=%x New hierarchy mask=%x\n", + old_hierarchy_mask, hierarchy_mask); + + for (i = 0; i < HIERARCHY_WEIGHTS; i++) + dev_dbg(kctx->kbdev->dev, " Hierarchy weight %02d: %08x\n", + i, weights[i]); + + j = 0; + + for (i = 0; i < HIERARCHY_WEIGHTS; i++) { + if (hierarchy_mask & (1 << i)) { + KBASE_DEBUG_ASSERT(j < FBD_HIERARCHY_WEIGHTS); + + dev_dbg(kctx->kbdev->dev, + " Writing hierarchy level %02d (%08x) to %d\n", + i, weights[i], j); + + fbd_tiler->weights[j++] = weights[i]; + } + } + + for (; j < FBD_HIERARCHY_WEIGHTS; j++) + fbd_tiler->weights[j] = 0; + + fbd_tiler->flags = hierarchy_mask | (1 << 16); + } + + fbd_tiler->heap_free_address = tiler_heap_free; + + kbase_vunmap(kctx, &map); + + return 0; +} + +/** + * @brief Reset the status of an FBD pointed to by a tiler job + * + * This performs two functions : + * - Set the hierarchy mask + * - Reset the tiler free heap address + * + * @param[in] kctx Context pointer + * @param[in] job_header Address of job header to reset. + * @param[in] tiler_heap_free The value to reset Tiler Heap Free to + * @param[in] hierarchy_mask The hierarchy mask to use + * @param[in] default_weight Default hierarchy weight to write when no other + * weight is given in the FBD + * @param[in] job_64 true if this job is using 64-bit + * descriptors + * + * @return 0 on success, error code on failure + */ +static int kbasep_replay_reset_tiler_job(struct kbase_context *kctx, + u64 job_header, u64 tiler_heap_free, + u16 hierarchy_mask, u32 default_weight, bool job_64) +{ + struct kbase_vmap_struct map; + u64 fbd_address; + + if (job_64) { + u64 *job_ext; + + job_ext = kbase_vmap(kctx, + job_header + JOB_HEADER_64_FBD_OFFSET, + sizeof(*job_ext), &map); + + if (!job_ext) { + dev_err(kctx->kbdev->dev, "kbasep_replay_reset_tiler_job: failed to map jc\n"); + return -EINVAL; + } + + fbd_address = *job_ext; + + kbase_vunmap(kctx, &map); + } else { + u32 *job_ext; + + job_ext = kbase_vmap(kctx, + job_header + JOB_HEADER_32_FBD_OFFSET, + sizeof(*job_ext), &map); + + if (!job_ext) { + dev_err(kctx->kbdev->dev, "kbasep_replay_reset_tiler_job: failed to map jc\n"); + return -EINVAL; + } + + fbd_address = *job_ext; + + kbase_vunmap(kctx, &map); + } + + if (fbd_address & FBD_TYPE) { + return kbasep_replay_reset_mfbd(kctx, + fbd_address & FBD_POINTER_MASK, + tiler_heap_free, + hierarchy_mask, + default_weight); + } else { + return kbasep_replay_reset_sfbd(kctx, + fbd_address & FBD_POINTER_MASK, + tiler_heap_free, + hierarchy_mask, + default_weight); + } +} + +/** + * @brief Reset the status of a job + * + * This performs the following functions : + * + * - Reset the Job Status field of each job to NOT_STARTED. + * - Set the Job Type field of any Vertex Jobs to Null Job. + * - For any jobs using an FBD, set the Tiler Heap Free field to the value of + * the tiler_heap_free parameter, and set the hierarchy level mask to the + * hier_mask parameter. + * - Offset HW dependencies by the hw_job_id_offset parameter + * - Set the Perform Job Barrier flag if this job is the first in the chain + * - Read the address of the next job header + * + * @param[in] kctx Context pointer + * @param[in,out] job_header Address of job header to reset. Set to address + * of next job header on exit. + * @param[in] prev_jc Previous job chain to link to, if this job is + * the last in the chain. + * @param[in] hw_job_id_offset Offset for HW job IDs + * @param[in] tiler_heap_free The value to reset Tiler Heap Free to + * @param[in] hierarchy_mask The hierarchy mask to use + * @param[in] default_weight Default hierarchy weight to write when no other + * weight is given in the FBD + * @param[in] first_in_chain true if this job is the first in the chain + * @param[in] fragment_chain true if this job is in the fragment chain + * + * @return 0 on success, error code on failure + */ +static int kbasep_replay_reset_job(struct kbase_context *kctx, + u64 *job_header, u64 prev_jc, + u64 tiler_heap_free, u16 hierarchy_mask, + u32 default_weight, u16 hw_job_id_offset, + bool first_in_chain, bool fragment_chain) +{ + struct fragment_job *frag_job; + struct job_descriptor_header *job; + u64 new_job_header; + struct kbase_vmap_struct map; + + frag_job = kbase_vmap(kctx, *job_header, sizeof(*frag_job), &map); + if (!frag_job) { + dev_err(kctx->kbdev->dev, + "kbasep_replay_parse_jc: failed to map jc\n"); + return -EINVAL; + } + job = &frag_job->header; + + dump_job_head(kctx, "Job header:", job); + + if (job->exception_status == JOB_NOT_STARTED && !fragment_chain) { + dev_err(kctx->kbdev->dev, "Job already not started\n"); + goto out_unmap; + } + job->exception_status = JOB_NOT_STARTED; + + if (job->job_type == JOB_TYPE_VERTEX) + job->job_type = JOB_TYPE_NULL; + + if (job->job_type == JOB_TYPE_FUSED) { + dev_err(kctx->kbdev->dev, "Fused jobs can not be replayed\n"); + goto out_unmap; + } + + if (first_in_chain) + job->job_barrier = 1; + + if ((job->job_dependency_index_1 + hw_job_id_offset) > + JOB_HEADER_ID_MAX || + (job->job_dependency_index_2 + hw_job_id_offset) > + JOB_HEADER_ID_MAX || + (job->job_index + hw_job_id_offset) > JOB_HEADER_ID_MAX) { + dev_err(kctx->kbdev->dev, + "Job indicies/dependencies out of valid range\n"); + goto out_unmap; + } + + if (job->job_dependency_index_1) + job->job_dependency_index_1 += hw_job_id_offset; + if (job->job_dependency_index_2) + job->job_dependency_index_2 += hw_job_id_offset; + + job->job_index += hw_job_id_offset; + + if (job->job_descriptor_size) { + new_job_header = job->next_job._64; + if (!job->next_job._64) + job->next_job._64 = prev_jc; + } else { + new_job_header = job->next_job._32; + if (!job->next_job._32) + job->next_job._32 = prev_jc; + } + dump_job_head(kctx, "Updated to:", job); + + if (job->job_type == JOB_TYPE_TILER) { + bool job_64 = job->job_descriptor_size != 0; + + if (kbasep_replay_reset_tiler_job(kctx, *job_header, + tiler_heap_free, hierarchy_mask, + default_weight, job_64) != 0) + goto out_unmap; + + } else if (job->job_type == JOB_TYPE_FRAGMENT) { + u64 fbd_address; + + if (job->job_descriptor_size) + fbd_address = frag_job->fragment_fbd._64; + else + fbd_address = (u64)frag_job->fragment_fbd._32; + + if (fbd_address & FBD_TYPE) { + if (kbasep_replay_reset_mfbd(kctx, + fbd_address & FBD_POINTER_MASK, + tiler_heap_free, + hierarchy_mask, + default_weight) != 0) + goto out_unmap; + } else { + if (kbasep_replay_reset_sfbd(kctx, + fbd_address & FBD_POINTER_MASK, + tiler_heap_free, + hierarchy_mask, + default_weight) != 0) + goto out_unmap; + } + } + + kbase_vunmap(kctx, &map); + + *job_header = new_job_header; + + return 0; + +out_unmap: + kbase_vunmap(kctx, &map); + return -EINVAL; +} + +/** + * @brief Find the highest job ID in a job chain + * + * @param[in] kctx Context pointer + * @param[in] jc Job chain start address + * @param[out] hw_job_id Highest job ID in chain + * + * @return 0 on success, error code on failure + */ +static int kbasep_replay_find_hw_job_id(struct kbase_context *kctx, + u64 jc, u16 *hw_job_id) +{ + while (jc) { + struct job_descriptor_header *job; + struct kbase_vmap_struct map; + + dev_dbg(kctx->kbdev->dev, + "kbasep_replay_find_hw_job_id: parsing jc=%llx\n", jc); + + job = kbase_vmap(kctx, jc, sizeof(*job), &map); + if (!job) { + dev_err(kctx->kbdev->dev, "failed to map jc\n"); + + return -EINVAL; + } + + if (job->job_index > *hw_job_id) + *hw_job_id = job->job_index; + + if (job->job_descriptor_size) + jc = job->next_job._64; + else + jc = job->next_job._32; + + kbase_vunmap(kctx, &map); + } + + return 0; +} + +/** + * @brief Reset the status of a number of jobs + * + * This function walks the provided job chain, and calls + * kbasep_replay_reset_job for each job. It also links the job chain to the + * provided previous job chain. + * + * The function will fail if any of the jobs passed already have status of + * NOT_STARTED. + * + * @param[in] kctx Context pointer + * @param[in] jc Job chain to be processed + * @param[in] prev_jc Job chain to be added to. May be NULL + * @param[in] tiler_heap_free The value to reset Tiler Heap Free to + * @param[in] hierarchy_mask The hierarchy mask to use + * @param[in] default_weight Default hierarchy weight to write when no other + * weight is given in the FBD + * @param[in] hw_job_id_offset Offset for HW job IDs + * @param[in] fragment_chain true if this chain is the fragment chain + * + * @return 0 on success, error code otherwise + */ +static int kbasep_replay_parse_jc(struct kbase_context *kctx, + u64 jc, u64 prev_jc, + u64 tiler_heap_free, u16 hierarchy_mask, + u32 default_weight, u16 hw_job_id_offset, + bool fragment_chain) +{ + bool first_in_chain = true; + int nr_jobs = 0; + + dev_dbg(kctx->kbdev->dev, "kbasep_replay_parse_jc: jc=%llx hw_job_id=%x\n", + jc, hw_job_id_offset); + + while (jc) { + dev_dbg(kctx->kbdev->dev, "kbasep_replay_parse_jc: parsing jc=%llx\n", jc); + + if (kbasep_replay_reset_job(kctx, &jc, prev_jc, + tiler_heap_free, hierarchy_mask, + default_weight, hw_job_id_offset, + first_in_chain, fragment_chain) != 0) + return -EINVAL; + + first_in_chain = false; + + nr_jobs++; + if (fragment_chain && + nr_jobs >= BASE_JD_REPLAY_F_CHAIN_JOB_LIMIT) { + dev_err(kctx->kbdev->dev, + "Exceeded maximum number of jobs in fragment chain\n"); + return -EINVAL; + } + } + + return 0; +} + +/** + * @brief Reset the status of a replay job, and set up dependencies + * + * This performs the actions to allow the replay job to be re-run following + * completion of the passed dependency. + * + * @param[in] katom The atom to be reset + * @param[in] dep_atom The dependency to be attached to the atom + */ +static void kbasep_replay_reset_softjob(struct kbase_jd_atom *katom, + struct kbase_jd_atom *dep_atom) +{ + katom->status = KBASE_JD_ATOM_STATE_QUEUED; + kbase_jd_katom_dep_set(&katom->dep[0], dep_atom, BASE_JD_DEP_TYPE_DATA); + list_add_tail(&katom->dep_item[0], &dep_atom->dep_head[0]); +} + +/** + * @brief Allocate an unused katom + * + * This will search the provided context for an unused katom, and will mark it + * as KBASE_JD_ATOM_STATE_QUEUED. + * + * If no atoms are available then the function will fail. + * + * @param[in] kctx Context pointer + * @return An atom ID, or -1 on failure + */ +static int kbasep_allocate_katom(struct kbase_context *kctx) +{ + struct kbase_jd_context *jctx = &kctx->jctx; + int i; + + for (i = BASE_JD_ATOM_COUNT-1; i > 0; i--) { + if (jctx->atoms[i].status == KBASE_JD_ATOM_STATE_UNUSED) { + jctx->atoms[i].status = KBASE_JD_ATOM_STATE_QUEUED; + dev_dbg(kctx->kbdev->dev, + "kbasep_allocate_katom: Allocated atom %d\n", + i); + return i; + } + } + + return -1; +} + +/** + * @brief Release a katom + * + * This will mark the provided atom as available, and remove any dependencies. + * + * For use on error path. + * + * @param[in] kctx Context pointer + * @param[in] atom_id ID of atom to release + */ +static void kbasep_release_katom(struct kbase_context *kctx, int atom_id) +{ + struct kbase_jd_context *jctx = &kctx->jctx; + + dev_dbg(kctx->kbdev->dev, "kbasep_release_katom: Released atom %d\n", + atom_id); + + while (!list_empty(&jctx->atoms[atom_id].dep_head[0])) + list_del(jctx->atoms[atom_id].dep_head[0].next); + + while (!list_empty(&jctx->atoms[atom_id].dep_head[1])) + list_del(jctx->atoms[atom_id].dep_head[1].next); + + jctx->atoms[atom_id].status = KBASE_JD_ATOM_STATE_UNUSED; +} + +static void kbasep_replay_create_atom(struct kbase_context *kctx, + struct base_jd_atom_v2 *atom, + int atom_nr, + base_jd_prio prio) +{ + atom->nr_extres = 0; + atom->extres_list = 0; + atom->device_nr = 0; + atom->prio = prio; + atom->atom_number = atom_nr; + + base_jd_atom_dep_set(&atom->pre_dep[0], 0 , BASE_JD_DEP_TYPE_INVALID); + base_jd_atom_dep_set(&atom->pre_dep[1], 0 , BASE_JD_DEP_TYPE_INVALID); + + atom->udata.blob[0] = 0; + atom->udata.blob[1] = 0; +} + +/** + * @brief Create two atoms for the purpose of replaying jobs + * + * Two atoms are allocated and created. The jc pointer is not set at this + * stage. The second atom has a dependency on the first. The remaining fields + * are set up as follows : + * + * - No external resources. Any required external resources will be held by the + * replay atom. + * - device_nr is set to 0. This is not relevant as + * BASE_JD_REQ_SPECIFIC_COHERENT_GROUP should not be set. + * - Priority is inherited from the replay job. + * + * @param[out] t_atom Atom to use for tiler jobs + * @param[out] f_atom Atom to use for fragment jobs + * @param[in] prio Priority of new atom (inherited from replay soft + * job) + * @return 0 on success, error code on failure + */ +static int kbasep_replay_create_atoms(struct kbase_context *kctx, + struct base_jd_atom_v2 *t_atom, + struct base_jd_atom_v2 *f_atom, + base_jd_prio prio) +{ + int t_atom_nr, f_atom_nr; + + t_atom_nr = kbasep_allocate_katom(kctx); + if (t_atom_nr < 0) { + dev_err(kctx->kbdev->dev, "Failed to allocate katom\n"); + return -EINVAL; + } + + f_atom_nr = kbasep_allocate_katom(kctx); + if (f_atom_nr < 0) { + dev_err(kctx->kbdev->dev, "Failed to allocate katom\n"); + kbasep_release_katom(kctx, t_atom_nr); + return -EINVAL; + } + + kbasep_replay_create_atom(kctx, t_atom, t_atom_nr, prio); + kbasep_replay_create_atom(kctx, f_atom, f_atom_nr, prio); + + base_jd_atom_dep_set(&f_atom->pre_dep[0], t_atom_nr , BASE_JD_DEP_TYPE_DATA); + + return 0; +} + +#ifdef CONFIG_MALI_DEBUG +static void payload_dump(struct kbase_context *kctx, base_jd_replay_payload *payload) +{ + u64 next; + + dev_dbg(kctx->kbdev->dev, "Tiler jc list :\n"); + next = payload->tiler_jc_list; + + while (next) { + struct kbase_vmap_struct map; + base_jd_replay_jc *jc_struct; + + jc_struct = kbase_vmap(kctx, next, sizeof(*jc_struct), &map); + + if (!jc_struct) + return; + + dev_dbg(kctx->kbdev->dev, "* jc_struct=%p jc=%llx next=%llx\n", + jc_struct, jc_struct->jc, jc_struct->next); + + next = jc_struct->next; + + kbase_vunmap(kctx, &map); + } +} +#endif + +/** + * @brief Parse a base_jd_replay_payload provided by userspace + * + * This will read the payload from userspace, and parse the job chains. + * + * @param[in] kctx Context pointer + * @param[in] replay_atom Replay soft job atom + * @param[in] t_atom Atom to use for tiler jobs + * @param[in] f_atom Atom to use for fragment jobs + * @return 0 on success, error code on failure + */ +static int kbasep_replay_parse_payload(struct kbase_context *kctx, + struct kbase_jd_atom *replay_atom, + struct base_jd_atom_v2 *t_atom, + struct base_jd_atom_v2 *f_atom) +{ + base_jd_replay_payload *payload = NULL; + u64 next; + u64 prev_jc = 0; + u16 hw_job_id_offset = 0; + int ret = -EINVAL; + struct kbase_vmap_struct map; + + dev_dbg(kctx->kbdev->dev, "kbasep_replay_parse_payload: replay_atom->jc = %llx sizeof(payload) = %zu\n", + replay_atom->jc, sizeof(payload)); + + payload = kbase_vmap(kctx, replay_atom->jc, sizeof(*payload), &map); + if (!payload) { + dev_err(kctx->kbdev->dev, "kbasep_replay_parse_payload: failed to map payload into kernel space\n"); + return -EINVAL; + } + +#ifdef BASE_LEGACY_UK10_2_SUPPORT + if (KBASE_API_VERSION(10, 3) > replay_atom->kctx->api_version) { + base_jd_replay_payload_uk10_2 *payload_uk10_2; + u16 tiler_core_req; + u16 fragment_core_req; + + payload_uk10_2 = (base_jd_replay_payload_uk10_2 *) payload; + memcpy(&tiler_core_req, &payload_uk10_2->tiler_core_req, + sizeof(tiler_core_req)); + memcpy(&fragment_core_req, &payload_uk10_2->fragment_core_req, + sizeof(fragment_core_req)); + payload->tiler_core_req = (u32)(tiler_core_req & 0x7fff); + payload->fragment_core_req = (u32)(fragment_core_req & 0x7fff); + } +#endif /* BASE_LEGACY_UK10_2_SUPPORT */ + +#ifdef CONFIG_MALI_DEBUG + dev_dbg(kctx->kbdev->dev, "kbasep_replay_parse_payload: payload=%p\n", payload); + dev_dbg(kctx->kbdev->dev, "Payload structure:\n" + "tiler_jc_list = %llx\n" + "fragment_jc = %llx\n" + "tiler_heap_free = %llx\n" + "fragment_hierarchy_mask = %x\n" + "tiler_hierarchy_mask = %x\n" + "hierarchy_default_weight = %x\n" + "tiler_core_req = %x\n" + "fragment_core_req = %x\n", + payload->tiler_jc_list, + payload->fragment_jc, + payload->tiler_heap_free, + payload->fragment_hierarchy_mask, + payload->tiler_hierarchy_mask, + payload->hierarchy_default_weight, + payload->tiler_core_req, + payload->fragment_core_req); + payload_dump(kctx, payload); +#endif + t_atom->core_req = payload->tiler_core_req | BASEP_JD_REQ_EVENT_NEVER; + f_atom->core_req = payload->fragment_core_req | BASEP_JD_REQ_EVENT_NEVER; + + /* Sanity check core requirements*/ + if ((t_atom->core_req & BASE_JD_REQ_ATOM_TYPE) != BASE_JD_REQ_T || + (f_atom->core_req & BASE_JD_REQ_ATOM_TYPE) != BASE_JD_REQ_FS || + t_atom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES || + f_atom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES) { + + int t_atom_type = t_atom->core_req & BASE_JD_REQ_ATOM_TYPE & ~BASE_JD_REQ_COHERENT_GROUP; + int f_atom_type = f_atom->core_req & BASE_JD_REQ_ATOM_TYPE & ~BASE_JD_REQ_COHERENT_GROUP & ~BASE_JD_REQ_FS_AFBC; + int t_has_ex_res = t_atom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES; + int f_has_ex_res = f_atom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES; + + if (t_atom_type != BASE_JD_REQ_T) { + dev_err(kctx->kbdev->dev, "Invalid core requirement: Tiler atom not a tiler job. Was: 0x%x\n Expected: 0x%x", + t_atom_type, BASE_JD_REQ_T); + } + if (f_atom_type != BASE_JD_REQ_FS) { + dev_err(kctx->kbdev->dev, "Invalid core requirement: Fragment shader atom not a fragment shader. Was 0x%x Expected: 0x%x\n", + f_atom_type, BASE_JD_REQ_FS); + } + if (t_has_ex_res) { + dev_err(kctx->kbdev->dev, "Invalid core requirement: Tiler atom has external resources.\n"); + } + if (f_has_ex_res) { + dev_err(kctx->kbdev->dev, "Invalid core requirement: Fragment shader atom has external resources.\n"); + } + + goto out; + } + + /* Process tiler job chains */ + next = payload->tiler_jc_list; + if (!next) { + dev_err(kctx->kbdev->dev, "Invalid tiler JC list\n"); + goto out; + } + + while (next) { + base_jd_replay_jc *jc_struct; + struct kbase_vmap_struct jc_map; + u64 jc; + + jc_struct = kbase_vmap(kctx, next, sizeof(*jc_struct), &jc_map); + + if (!jc_struct) { + dev_err(kctx->kbdev->dev, "Failed to map jc struct\n"); + goto out; + } + + jc = jc_struct->jc; + next = jc_struct->next; + if (next) + jc_struct->jc = 0; + + kbase_vunmap(kctx, &jc_map); + + if (jc) { + u16 max_hw_job_id = 0; + + if (kbasep_replay_find_hw_job_id(kctx, jc, + &max_hw_job_id) != 0) + goto out; + + if (kbasep_replay_parse_jc(kctx, jc, prev_jc, + payload->tiler_heap_free, + payload->tiler_hierarchy_mask, + payload->hierarchy_default_weight, + hw_job_id_offset, false) != 0) { + goto out; + } + + hw_job_id_offset += max_hw_job_id; + + prev_jc = jc; + } + } + t_atom->jc = prev_jc; + + /* Process fragment job chain */ + f_atom->jc = payload->fragment_jc; + if (kbasep_replay_parse_jc(kctx, payload->fragment_jc, 0, + payload->tiler_heap_free, + payload->fragment_hierarchy_mask, + payload->hierarchy_default_weight, 0, + true) != 0) { + goto out; + } + + if (!t_atom->jc || !f_atom->jc) { + dev_err(kctx->kbdev->dev, "Invalid payload\n"); + goto out; + } + + dev_dbg(kctx->kbdev->dev, "t_atom->jc=%llx f_atom->jc=%llx\n", + t_atom->jc, f_atom->jc); + ret = 0; + +out: + kbase_vunmap(kctx, &map); + + return ret; +} + +static void kbase_replay_process_worker(struct work_struct *data) +{ + struct kbase_jd_atom *katom; + struct kbase_context *kctx; + struct kbase_jd_context *jctx; + bool need_to_try_schedule_context = false; + + struct base_jd_atom_v2 t_atom, f_atom; + struct kbase_jd_atom *t_katom, *f_katom; + base_jd_prio atom_prio; + + katom = container_of(data, struct kbase_jd_atom, work); + kctx = katom->kctx; + jctx = &kctx->jctx; + + mutex_lock(&jctx->lock); + + atom_prio = kbasep_js_sched_prio_to_atom_prio(katom->sched_priority); + + if (kbasep_replay_create_atoms( + kctx, &t_atom, &f_atom, atom_prio) != 0) { + katom->event_code = BASE_JD_EVENT_JOB_CANCELLED; + goto out; + } + + t_katom = &jctx->atoms[t_atom.atom_number]; + f_katom = &jctx->atoms[f_atom.atom_number]; + + if (kbasep_replay_parse_payload(kctx, katom, &t_atom, &f_atom) != 0) { + kbasep_release_katom(kctx, t_atom.atom_number); + kbasep_release_katom(kctx, f_atom.atom_number); + katom->event_code = BASE_JD_EVENT_JOB_CANCELLED; + goto out; + } + + kbasep_replay_reset_softjob(katom, f_katom); + + need_to_try_schedule_context |= jd_submit_atom(kctx, &t_atom, t_katom); + if (t_katom->event_code == BASE_JD_EVENT_JOB_INVALID) { + dev_err(kctx->kbdev->dev, "Replay failed to submit atom\n"); + kbasep_release_katom(kctx, f_atom.atom_number); + katom->event_code = BASE_JD_EVENT_JOB_CANCELLED; + goto out; + } + need_to_try_schedule_context |= jd_submit_atom(kctx, &f_atom, f_katom); + if (f_katom->event_code == BASE_JD_EVENT_JOB_INVALID) { + dev_err(kctx->kbdev->dev, "Replay failed to submit atom\n"); + katom->event_code = BASE_JD_EVENT_JOB_CANCELLED; + goto out; + } + + katom->event_code = BASE_JD_EVENT_DONE; + +out: + if (katom->event_code != BASE_JD_EVENT_DONE) { + kbase_disjoint_state_down(kctx->kbdev); + + need_to_try_schedule_context |= jd_done_nolock(katom, NULL); + } + + if (need_to_try_schedule_context) + kbase_js_sched_all(kctx->kbdev); + + mutex_unlock(&jctx->lock); +} + +/** + * @brief Check job replay fault + * + * This will read the job payload, checks fault type and source, then decides + * whether replay is required. + * + * @param[in] katom The atom to be processed + * @return true (success) if replay required or false on failure. + */ +static bool kbase_replay_fault_check(struct kbase_jd_atom *katom) +{ + struct kbase_context *kctx = katom->kctx; + struct device *dev = kctx->kbdev->dev; + base_jd_replay_payload *payload; + u64 job_header; + u64 job_loop_detect; + struct job_descriptor_header *job; + struct kbase_vmap_struct job_map; + struct kbase_vmap_struct map; + bool err = false; + + /* Replay job if fault is of type BASE_JD_EVENT_JOB_WRITE_FAULT or + * if force_replay is enabled. + */ + if (BASE_JD_EVENT_TERMINATED == katom->event_code) { + return false; + } else if (BASE_JD_EVENT_JOB_WRITE_FAULT == katom->event_code) { + return true; + } else if (BASE_JD_EVENT_FORCE_REPLAY == katom->event_code) { + katom->event_code = BASE_JD_EVENT_DATA_INVALID_FAULT; + return true; + } else if (BASE_JD_EVENT_DATA_INVALID_FAULT != katom->event_code) { + /* No replay for faults of type other than + * BASE_JD_EVENT_DATA_INVALID_FAULT. + */ + return false; + } + + /* Job fault is BASE_JD_EVENT_DATA_INVALID_FAULT, now scan fragment jc + * to find out whether the source of exception is POLYGON_LIST. Replay + * is required if the source of fault is POLYGON_LIST. + */ + payload = kbase_vmap(kctx, katom->jc, sizeof(*payload), &map); + if (!payload) { + dev_err(dev, "kbase_replay_fault_check: failed to map payload.\n"); + return false; + } + +#ifdef CONFIG_MALI_DEBUG + dev_dbg(dev, "kbase_replay_fault_check: payload=%p\n", payload); + dev_dbg(dev, "\nPayload structure:\n" + "fragment_jc = 0x%llx\n" + "fragment_hierarchy_mask = 0x%x\n" + "fragment_core_req = 0x%x\n", + payload->fragment_jc, + payload->fragment_hierarchy_mask, + payload->fragment_core_req); +#endif + /* Process fragment job chain */ + job_header = (u64) payload->fragment_jc; + job_loop_detect = job_header; + while (job_header) { + job = kbase_vmap(kctx, job_header, sizeof(*job), &job_map); + if (!job) { + dev_err(dev, "failed to map jc\n"); + /* unmap payload*/ + kbase_vunmap(kctx, &map); + return false; + } + + + dump_job_head(kctx, "\njob_head structure:\n", job); + + /* Replay only when the polygon list reader caused the + * DATA_INVALID_FAULT */ + if ((BASE_JD_EVENT_DATA_INVALID_FAULT == katom->event_code) && + (JOB_POLYGON_LIST == JOB_SOURCE_ID(job->exception_status))) { + err = true; + kbase_vunmap(kctx, &job_map); + break; + } + + /* Move on to next fragment job in the list */ + if (job->job_descriptor_size) + job_header = job->next_job._64; + else + job_header = job->next_job._32; + + kbase_vunmap(kctx, &job_map); + + /* Job chain loop detected */ + if (job_header == job_loop_detect) + break; + } + + /* unmap payload*/ + kbase_vunmap(kctx, &map); + + return err; +} + + +/** + * @brief Process a replay job + * + * Called from kbase_process_soft_job. + * + * On exit, if the job has completed, katom->event_code will have been updated. + * If the job has not completed, and is replaying jobs, then the atom status + * will have been reset to KBASE_JD_ATOM_STATE_QUEUED. + * + * @param[in] katom The atom to be processed + * @return false if the atom has completed + * true if the atom is replaying jobs + */ +bool kbase_replay_process(struct kbase_jd_atom *katom) +{ + struct kbase_context *kctx = katom->kctx; + struct kbase_device *kbdev = kctx->kbdev; + + /* Don't replay this atom if these issues are not present in the + * hardware */ + if (!kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_11020) && + !kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_11024)) { + dev_dbg(kbdev->dev, "Hardware does not need replay workaround"); + + /* Signal failure to userspace */ + katom->event_code = BASE_JD_EVENT_JOB_INVALID; + + return false; + } + + if (katom->event_code == BASE_JD_EVENT_DONE) { + dev_dbg(kbdev->dev, "Previous job succeeded - not replaying\n"); + + if (katom->retry_count) + kbase_disjoint_state_down(kbdev); + + return false; + } + + if (kbase_ctx_flag(kctx, KCTX_DYING)) { + dev_dbg(kbdev->dev, "Not replaying; context is dying\n"); + + if (katom->retry_count) + kbase_disjoint_state_down(kbdev); + + return false; + } + + /* Check job exception type and source before replaying. */ + if (!kbase_replay_fault_check(katom)) { + dev_dbg(kbdev->dev, + "Replay cancelled on event %x\n", katom->event_code); + /* katom->event_code is already set to the failure code of the + * previous job. + */ + return false; + } + + dev_warn(kbdev->dev, "Replaying jobs retry=%d\n", + katom->retry_count); + + katom->retry_count++; + + if (katom->retry_count > BASEP_JD_REPLAY_LIMIT) { + dev_err(kbdev->dev, "Replay exceeded limit - failing jobs\n"); + + kbase_disjoint_state_down(kbdev); + + /* katom->event_code is already set to the failure code of the + previous job */ + return false; + } + + /* only enter the disjoint state once for the whole time while the replay is ongoing */ + if (katom->retry_count == 1) + kbase_disjoint_state_up(kbdev); + + INIT_WORK(&katom->work, kbase_replay_process_worker); + queue_work(kctx->event_workq, &katom->work); + + return true; +} diff --git a/drivers/gpu/arm/midgard/mali_kbase_smc.c b/drivers/gpu/arm/midgard/mali_kbase_smc.c new file mode 100644 index 00000000000000..43175c85988fd0 --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_smc.c @@ -0,0 +1,74 @@ +/* + * + * (C) COPYRIGHT 2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +#ifdef CONFIG_ARM64 + +#include +#include + +#include + +static noinline u64 invoke_smc_fid(u64 function_id, + u64 arg0, u64 arg1, u64 arg2) +{ + register u64 x0 asm("x0") = function_id; + register u64 x1 asm("x1") = arg0; + register u64 x2 asm("x2") = arg1; + register u64 x3 asm("x3") = arg2; + + asm volatile( + __asmeq("%0", "x0") + __asmeq("%1", "x1") + __asmeq("%2", "x2") + __asmeq("%3", "x3") + "smc #0\n" + : "+r" (x0) + : "r" (x1), "r" (x2), "r" (x3)); + + return x0; +} + +u64 kbase_invoke_smc_fid(u32 fid, u64 arg0, u64 arg1, u64 arg2) +{ + /* Is fast call (bit 31 set) */ + KBASE_DEBUG_ASSERT(fid & ~SMC_FAST_CALL); + /* bits 16-23 must be zero for fast calls */ + KBASE_DEBUG_ASSERT((fid & (0xFF << 16)) == 0); + + return invoke_smc_fid(fid, arg0, arg1, arg2); +} + +u64 kbase_invoke_smc(u32 oen, u16 function_number, bool smc64, + u64 arg0, u64 arg1, u64 arg2) +{ + u32 fid = 0; + + /* Only the six bits allowed should be used. */ + KBASE_DEBUG_ASSERT((oen & ~SMC_OEN_MASK) == 0); + + fid |= SMC_FAST_CALL; /* Bit 31: Fast call */ + if (smc64) + fid |= SMC_64; /* Bit 30: 1=SMC64, 0=SMC32 */ + fid |= oen; /* Bit 29:24: OEN */ + /* Bit 23:16: Must be zero for fast calls */ + fid |= (function_number); /* Bit 15:0: function number */ + + return kbase_invoke_smc_fid(fid, arg0, arg1, arg2); +} + +#endif /* CONFIG_ARM64 */ + diff --git a/drivers/gpu/arm/midgard/mali_kbase_smc.h b/drivers/gpu/arm/midgard/mali_kbase_smc.h new file mode 100644 index 00000000000000..9bff3d2e8b4d87 --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_smc.h @@ -0,0 +1,67 @@ +/* + * + * (C) COPYRIGHT 2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + + +#ifndef _KBASE_SMC_H_ +#define _KBASE_SMC_H_ + +#ifdef CONFIG_ARM64 + +#include + +#define SMC_FAST_CALL (1 << 31) +#define SMC_64 (1 << 30) + +#define SMC_OEN_OFFSET 24 +#define SMC_OEN_MASK (0x3F << SMC_OEN_OFFSET) /* 6 bits */ +#define SMC_OEN_SIP (2 << SMC_OEN_OFFSET) +#define SMC_OEN_STD (4 << SMC_OEN_OFFSET) + + +/** + * kbase_invoke_smc_fid - Perform a secure monitor call + * @fid: The SMC function to call, see SMC Calling convention. + * @arg0: First argument to the SMC. + * @arg1: Second argument to the SMC. + * @arg2: Third argument to the SMC. + * + * See SMC Calling Convention for details. + * + * Return: the return value from the SMC. + */ +u64 kbase_invoke_smc_fid(u32 fid, u64 arg0, u64 arg1, u64 arg2); + +/** + * kbase_invoke_smc_fid - Perform a secure monitor call + * @oen: Owning Entity number (SIP, STD etc). + * @function_number: The function number within the OEN. + * @smc64: use SMC64 calling convention instead of SMC32. + * @arg0: First argument to the SMC. + * @arg1: Second argument to the SMC. + * @arg2: Third argument to the SMC. + * + * See SMC Calling Convention for details. + * + * Return: the return value from the SMC call. + */ +u64 kbase_invoke_smc(u32 oen, u16 function_number, bool smc64, + u64 arg0, u64 arg1, u64 arg2); + +#endif /* CONFIG_ARM64 */ + +#endif /* _KBASE_SMC_H_ */ diff --git a/drivers/gpu/arm/midgard/mali_kbase_softjobs.c b/drivers/gpu/arm/midgard/mali_kbase_softjobs.c new file mode 100644 index 00000000000000..1ffc066634825c --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_softjobs.c @@ -0,0 +1,1556 @@ +/* + * + * (C) COPYRIGHT 2011-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + + +#include + +#if defined(CONFIG_DMA_SHARED_BUFFER) +#include +#include +#endif /* defined(CONFIG_DMA_SHARED_BUFFER) */ +#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE) +#include +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/** + * @file mali_kbase_softjobs.c + * + * This file implements the logic behind software only jobs that are + * executed within the driver rather than being handed over to the GPU. + */ + +static void kbasep_add_waiting_soft_job(struct kbase_jd_atom *katom) +{ + struct kbase_context *kctx = katom->kctx; + unsigned long lflags; + + spin_lock_irqsave(&kctx->waiting_soft_jobs_lock, lflags); + list_add_tail(&katom->queue, &kctx->waiting_soft_jobs); + spin_unlock_irqrestore(&kctx->waiting_soft_jobs_lock, lflags); +} + +void kbasep_remove_waiting_soft_job(struct kbase_jd_atom *katom) +{ + struct kbase_context *kctx = katom->kctx; + unsigned long lflags; + + spin_lock_irqsave(&kctx->waiting_soft_jobs_lock, lflags); + list_del(&katom->queue); + spin_unlock_irqrestore(&kctx->waiting_soft_jobs_lock, lflags); +} + +static void kbasep_add_waiting_with_timeout(struct kbase_jd_atom *katom) +{ + struct kbase_context *kctx = katom->kctx; + + /* Record the start time of this atom so we could cancel it at + * the right time. + */ + katom->start_timestamp = ktime_get(); + + /* Add the atom to the waiting list before the timer is + * (re)started to make sure that it gets processed. + */ + kbasep_add_waiting_soft_job(katom); + + /* Schedule timeout of this atom after a period if it is not active */ + if (!timer_pending(&kctx->soft_job_timeout)) { + int timeout_ms = atomic_read( + &kctx->kbdev->js_data.soft_job_timeout_ms); + mod_timer(&kctx->soft_job_timeout, + jiffies + msecs_to_jiffies(timeout_ms)); + } +} + +static int kbasep_read_soft_event_status( + struct kbase_context *kctx, u64 evt, unsigned char *status) +{ + unsigned char *mapped_evt; + struct kbase_vmap_struct map; + + mapped_evt = kbase_vmap(kctx, evt, sizeof(*mapped_evt), &map); + if (!mapped_evt) + return -EFAULT; + + *status = *mapped_evt; + + kbase_vunmap(kctx, &map); + + return 0; +} + +static int kbasep_write_soft_event_status( + struct kbase_context *kctx, u64 evt, unsigned char new_status) +{ + unsigned char *mapped_evt; + struct kbase_vmap_struct map; + + if ((new_status != BASE_JD_SOFT_EVENT_SET) && + (new_status != BASE_JD_SOFT_EVENT_RESET)) + return -EINVAL; + + mapped_evt = kbase_vmap(kctx, evt, sizeof(*mapped_evt), &map); + if (!mapped_evt) + return -EFAULT; + + *mapped_evt = new_status; + + kbase_vunmap(kctx, &map); + + return 0; +} + +static int kbase_dump_cpu_gpu_time(struct kbase_jd_atom *katom) +{ + struct kbase_vmap_struct map; + void *user_result; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0) + struct timespec64 ts; +#else + struct timespec ts; +#endif + struct base_dump_cpu_gpu_counters data; + u64 system_time; + u64 cycle_counter; + u64 jc = katom->jc; + struct kbase_context *kctx = katom->kctx; + int pm_active_err; + + memset(&data, 0, sizeof(data)); + + /* Take the PM active reference as late as possible - otherwise, it could + * delay suspend until we process the atom (which may be at the end of a + * long chain of dependencies */ + pm_active_err = kbase_pm_context_active_handle_suspend(kctx->kbdev, KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE); + if (pm_active_err) { + struct kbasep_js_device_data *js_devdata = &kctx->kbdev->js_data; + + /* We're suspended - queue this on the list of suspended jobs + * Use dep_item[1], because dep_item[0] was previously in use + * for 'waiting_soft_jobs'. + */ + mutex_lock(&js_devdata->runpool_mutex); + list_add_tail(&katom->dep_item[1], &js_devdata->suspended_soft_jobs_list); + mutex_unlock(&js_devdata->runpool_mutex); + + /* Also adding this to the list of waiting soft job */ + kbasep_add_waiting_soft_job(katom); + + return pm_active_err; + } + + kbase_backend_get_gpu_time(kctx->kbdev, &cycle_counter, &system_time, + &ts); + + kbase_pm_context_idle(kctx->kbdev); + + data.sec = ts.tv_sec; + data.usec = ts.tv_nsec / 1000; + data.system_time = system_time; + data.cycle_counter = cycle_counter; + + /* Assume this atom will be cancelled until we know otherwise */ + katom->event_code = BASE_JD_EVENT_JOB_CANCELLED; + + /* GPU_WR access is checked on the range for returning the result to + * userspace for the following reasons: + * - security, this is currently how imported user bufs are checked. + * - userspace ddk guaranteed to assume region was mapped as GPU_WR */ + user_result = kbase_vmap_prot(kctx, jc, sizeof(data), KBASE_REG_GPU_WR, &map); + if (!user_result) + return 0; + + memcpy(user_result, &data, sizeof(data)); + + kbase_vunmap(kctx, &map); + + /* Atom was fine - mark it as done */ + katom->event_code = BASE_JD_EVENT_DONE; + + return 0; +} + +#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE) +/* Called by the explicit fence mechanism when a fence wait has completed */ +void kbase_soft_event_wait_callback(struct kbase_jd_atom *katom) +{ + struct kbase_context *kctx = katom->kctx; + + mutex_lock(&kctx->jctx.lock); + kbasep_remove_waiting_soft_job(katom); + kbase_finish_soft_job(katom); + if (jd_done_nolock(katom, NULL)) + kbase_js_sched_all(kctx->kbdev); + mutex_unlock(&kctx->jctx.lock); +} +#endif + +static void kbasep_soft_event_complete_job(struct work_struct *work) +{ + struct kbase_jd_atom *katom = container_of(work, struct kbase_jd_atom, + work); + struct kbase_context *kctx = katom->kctx; + int resched; + + mutex_lock(&kctx->jctx.lock); + resched = jd_done_nolock(katom, NULL); + mutex_unlock(&kctx->jctx.lock); + + if (resched) + kbase_js_sched_all(kctx->kbdev); +} + +void kbasep_complete_triggered_soft_events(struct kbase_context *kctx, u64 evt) +{ + int cancel_timer = 1; + struct list_head *entry, *tmp; + unsigned long lflags; + + spin_lock_irqsave(&kctx->waiting_soft_jobs_lock, lflags); + list_for_each_safe(entry, tmp, &kctx->waiting_soft_jobs) { + struct kbase_jd_atom *katom = list_entry( + entry, struct kbase_jd_atom, queue); + + switch (katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) { + case BASE_JD_REQ_SOFT_EVENT_WAIT: + if (katom->jc == evt) { + list_del(&katom->queue); + + katom->event_code = BASE_JD_EVENT_DONE; + INIT_WORK(&katom->work, + kbasep_soft_event_complete_job); + queue_work(kctx->jctx.job_done_wq, + &katom->work); + } else { + /* There are still other waiting jobs, we cannot + * cancel the timer yet. + */ + cancel_timer = 0; + } + break; +#ifdef CONFIG_MALI_FENCE_DEBUG + case BASE_JD_REQ_SOFT_FENCE_WAIT: + /* Keep the timer running if fence debug is enabled and + * there are waiting fence jobs. + */ + cancel_timer = 0; + break; +#endif + } + } + + if (cancel_timer) + del_timer(&kctx->soft_job_timeout); + spin_unlock_irqrestore(&kctx->waiting_soft_jobs_lock, lflags); +} + +#ifdef CONFIG_MALI_FENCE_DEBUG +static void kbase_fence_debug_check_atom(struct kbase_jd_atom *katom) +{ + struct kbase_context *kctx = katom->kctx; + struct device *dev = kctx->kbdev->dev; + int i; + + for (i = 0; i < 2; i++) { + struct kbase_jd_atom *dep; + + list_for_each_entry(dep, &katom->dep_head[i], dep_item[i]) { + if (dep->status == KBASE_JD_ATOM_STATE_UNUSED || + dep->status == KBASE_JD_ATOM_STATE_COMPLETED) + continue; + + if ((dep->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) + == BASE_JD_REQ_SOFT_FENCE_TRIGGER) { + /* Found blocked trigger fence. */ + struct kbase_sync_fence_info info; + + if (!kbase_sync_fence_in_info_get(dep, &info)) { + dev_warn(dev, + "\tVictim trigger atom %d fence [%p] %s: %s\n", + kbase_jd_atom_id(kctx, dep), + info.fence, + info.name, + kbase_sync_status_string(info.status)); + } + } + + kbase_fence_debug_check_atom(dep); + } + } +} + +static void kbase_fence_debug_wait_timeout(struct kbase_jd_atom *katom) +{ + struct kbase_context *kctx = katom->kctx; + struct device *dev = katom->kctx->kbdev->dev; + int timeout_ms = atomic_read(&kctx->kbdev->js_data.soft_job_timeout_ms); + unsigned long lflags; + struct kbase_sync_fence_info info; + + spin_lock_irqsave(&kctx->waiting_soft_jobs_lock, lflags); + + if (kbase_sync_fence_in_info_get(katom, &info)) { + /* Fence must have signaled just after timeout. */ + spin_unlock_irqrestore(&kctx->waiting_soft_jobs_lock, lflags); + return; + } + + dev_warn(dev, "ctx %d_%d: Atom %d still waiting for fence [%p] after %dms\n", + kctx->tgid, kctx->id, + kbase_jd_atom_id(kctx, katom), + info.fence, timeout_ms); + dev_warn(dev, "\tGuilty fence [%p] %s: %s\n", + info.fence, info.name, + kbase_sync_status_string(info.status)); + + /* Search for blocked trigger atoms */ + kbase_fence_debug_check_atom(katom); + + spin_unlock_irqrestore(&kctx->waiting_soft_jobs_lock, lflags); + + kbase_sync_fence_in_dump(katom); +} + +struct kbase_fence_debug_work { + struct kbase_jd_atom *katom; + struct work_struct work; +}; + +static void kbase_fence_debug_wait_timeout_worker(struct work_struct *work) +{ + struct kbase_fence_debug_work *w = container_of(work, + struct kbase_fence_debug_work, work); + struct kbase_jd_atom *katom = w->katom; + struct kbase_context *kctx = katom->kctx; + + mutex_lock(&kctx->jctx.lock); + kbase_fence_debug_wait_timeout(katom); + mutex_unlock(&kctx->jctx.lock); + + kfree(w); +} + +static void kbase_fence_debug_timeout(struct kbase_jd_atom *katom) +{ + struct kbase_fence_debug_work *work; + struct kbase_context *kctx = katom->kctx; + + /* Enqueue fence debug worker. Use job_done_wq to get + * debug print ordered with job completion. + */ + work = kzalloc(sizeof(struct kbase_fence_debug_work), GFP_ATOMIC); + /* Ignore allocation failure. */ + if (work) { + work->katom = katom; + INIT_WORK(&work->work, kbase_fence_debug_wait_timeout_worker); + queue_work(kctx->jctx.job_done_wq, &work->work); + } +} +#endif /* CONFIG_MALI_FENCE_DEBUG */ + +void kbasep_soft_job_timeout_worker(struct timer_list *t) +{ + struct kbase_context *kctx = from_timer(kctx, t, soft_job_timeout); + u32 timeout_ms = (u32)atomic_read( + &kctx->kbdev->js_data.soft_job_timeout_ms); + struct timer_list *timer = &kctx->soft_job_timeout; + ktime_t cur_time = ktime_get(); + bool restarting = false; + unsigned long lflags; + struct list_head *entry, *tmp; + + spin_lock_irqsave(&kctx->waiting_soft_jobs_lock, lflags); + list_for_each_safe(entry, tmp, &kctx->waiting_soft_jobs) { + struct kbase_jd_atom *katom = list_entry(entry, + struct kbase_jd_atom, queue); + s64 elapsed_time = ktime_to_ms(ktime_sub(cur_time, + katom->start_timestamp)); + + if (elapsed_time < (s64)timeout_ms) { + restarting = true; + continue; + } + + switch (katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) { + case BASE_JD_REQ_SOFT_EVENT_WAIT: + /* Take it out of the list to ensure that it + * will be cancelled in all cases + */ + list_del(&katom->queue); + + katom->event_code = BASE_JD_EVENT_JOB_CANCELLED; + INIT_WORK(&katom->work, kbasep_soft_event_complete_job); + queue_work(kctx->jctx.job_done_wq, &katom->work); + break; +#ifdef CONFIG_MALI_FENCE_DEBUG + case BASE_JD_REQ_SOFT_FENCE_WAIT: + kbase_fence_debug_timeout(katom); + break; +#endif + } + } + + if (restarting) + mod_timer(timer, jiffies + msecs_to_jiffies(timeout_ms)); + spin_unlock_irqrestore(&kctx->waiting_soft_jobs_lock, lflags); +} + +static int kbasep_soft_event_wait(struct kbase_jd_atom *katom) +{ + struct kbase_context *kctx = katom->kctx; + unsigned char status; + + /* The status of this soft-job is stored in jc */ + if (kbasep_read_soft_event_status(kctx, katom->jc, &status)) { + katom->event_code = BASE_JD_EVENT_JOB_CANCELLED; + return 0; + } + + if (status == BASE_JD_SOFT_EVENT_SET) + return 0; /* Event already set, nothing to do */ + + kbasep_add_waiting_with_timeout(katom); + + return 1; +} + +static void kbasep_soft_event_update_locked(struct kbase_jd_atom *katom, + unsigned char new_status) +{ + /* Complete jobs waiting on the same event */ + struct kbase_context *kctx = katom->kctx; + + if (kbasep_write_soft_event_status(kctx, katom->jc, new_status) != 0) { + katom->event_code = BASE_JD_EVENT_JOB_CANCELLED; + return; + } + + if (new_status == BASE_JD_SOFT_EVENT_SET) + kbasep_complete_triggered_soft_events(kctx, katom->jc); +} + +/** + * kbase_soft_event_update() - Update soft event state + * @kctx: Pointer to context + * @event: Event to update + * @new_status: New status value of event + * + * Update the event, and wake up any atoms waiting for the event. + * + * Return: 0 on success, a negative error code on failure. + */ +int kbase_soft_event_update(struct kbase_context *kctx, + u64 event, + unsigned char new_status) +{ + int err = 0; + + mutex_lock(&kctx->jctx.lock); + + if (kbasep_write_soft_event_status(kctx, event, new_status)) { + err = -ENOENT; + goto out; + } + + if (new_status == BASE_JD_SOFT_EVENT_SET) + kbasep_complete_triggered_soft_events(kctx, event); + +out: + mutex_unlock(&kctx->jctx.lock); + + return err; +} + +static void kbasep_soft_event_cancel_job(struct kbase_jd_atom *katom) +{ + katom->event_code = BASE_JD_EVENT_JOB_CANCELLED; + if (jd_done_nolock(katom, NULL)) + kbase_js_sched_all(katom->kctx->kbdev); +} + +struct kbase_debug_copy_buffer { + size_t size; + struct page **pages; + int nr_pages; + size_t offset; + struct kbase_mem_phy_alloc *gpu_alloc; + + struct page **extres_pages; + int nr_extres_pages; +}; + +static inline void free_user_buffer(struct kbase_debug_copy_buffer *buffer) +{ + struct page **pages = buffer->extres_pages; + int nr_pages = buffer->nr_extres_pages; + + if (pages) { + int i; + + for (i = 0; i < nr_pages; i++) { + struct page *pg = pages[i]; + + if (pg) + put_page(pg); + } + kfree(pages); + } +} + +static void kbase_debug_copy_finish(struct kbase_jd_atom *katom) +{ + struct kbase_debug_copy_buffer *buffers = + (struct kbase_debug_copy_buffer *)(uintptr_t)katom->jc; + unsigned int i; + unsigned int nr = katom->nr_extres; + + if (!buffers) + return; + + kbase_gpu_vm_lock(katom->kctx); + for (i = 0; i < nr; i++) { + int p; + struct kbase_mem_phy_alloc *gpu_alloc = buffers[i].gpu_alloc; + + if (!buffers[i].pages) + break; + for (p = 0; p < buffers[i].nr_pages; p++) { + struct page *pg = buffers[i].pages[p]; + + if (pg) + put_page(pg); + } + kfree(buffers[i].pages); + if (gpu_alloc) { + switch (gpu_alloc->type) { + case KBASE_MEM_TYPE_IMPORTED_USER_BUF: + { + free_user_buffer(&buffers[i]); + break; + } + default: + /* Nothing to be done. */ + break; + } + kbase_mem_phy_alloc_put(gpu_alloc); + } + } + kbase_gpu_vm_unlock(katom->kctx); + kfree(buffers); + + katom->jc = 0; +} + +static int kbase_debug_copy_prepare(struct kbase_jd_atom *katom) +{ + struct kbase_debug_copy_buffer *buffers; + struct base_jd_debug_copy_buffer *user_buffers = NULL; + unsigned int i; + unsigned int nr = katom->nr_extres; + int ret = 0; + void __user *user_structs = (void __user *)(uintptr_t)katom->jc; + + if (!user_structs) + return -EINVAL; + + buffers = kcalloc(nr, sizeof(*buffers), GFP_KERNEL); + if (!buffers) { + ret = -ENOMEM; + katom->jc = 0; + goto out_cleanup; + } + katom->jc = (u64)(uintptr_t)buffers; + + user_buffers = kmalloc_array(nr, sizeof(*user_buffers), GFP_KERNEL); + + if (!user_buffers) { + ret = -ENOMEM; + goto out_cleanup; + } + + ret = copy_from_user(user_buffers, user_structs, + sizeof(*user_buffers)*nr); + if (ret) { + ret = -EFAULT; + goto out_cleanup; + } + + for (i = 0; i < nr; i++) { + u64 addr = user_buffers[i].address; + u64 page_addr = addr & PAGE_MASK; + u64 end_page_addr = addr + user_buffers[i].size - 1; + u64 last_page_addr = end_page_addr & PAGE_MASK; + int nr_pages = (last_page_addr-page_addr)/PAGE_SIZE+1; + int pinned_pages; + struct kbase_va_region *reg; + struct base_external_resource user_extres; + + if (!addr) + continue; + + buffers[i].nr_pages = nr_pages; + buffers[i].offset = addr & ~PAGE_MASK; + if (buffers[i].offset >= PAGE_SIZE) { + ret = -EINVAL; + goto out_cleanup; + } + buffers[i].size = user_buffers[i].size; + + buffers[i].pages = kcalloc(nr_pages, sizeof(struct page *), + GFP_KERNEL); + if (!buffers[i].pages) { + ret = -ENOMEM; + goto out_cleanup; + } + + pinned_pages = get_user_pages_fast(page_addr, + nr_pages, + 1, /* Write */ + buffers[i].pages); + if (pinned_pages < 0) { + ret = pinned_pages; + goto out_cleanup; + } + if (pinned_pages != nr_pages) { + ret = -EINVAL; + goto out_cleanup; + } + + user_extres = user_buffers[i].extres; + if (user_extres.ext_resource == 0ULL) { + ret = -EINVAL; + goto out_cleanup; + } + + kbase_gpu_vm_lock(katom->kctx); + reg = kbase_region_tracker_find_region_enclosing_address( + katom->kctx, user_extres.ext_resource & + ~BASE_EXT_RES_ACCESS_EXCLUSIVE); + + if (NULL == reg || NULL == reg->gpu_alloc || + (reg->flags & KBASE_REG_FREE)) { + ret = -EINVAL; + goto out_unlock; + } + + buffers[i].gpu_alloc = kbase_mem_phy_alloc_get(reg->gpu_alloc); + buffers[i].nr_extres_pages = reg->nr_pages; + + if (reg->nr_pages*PAGE_SIZE != buffers[i].size) + dev_warn(katom->kctx->kbdev->dev, "Copy buffer is not of same size as the external resource to copy.\n"); + + switch (reg->gpu_alloc->type) { + case KBASE_MEM_TYPE_IMPORTED_USER_BUF: + { + struct kbase_mem_phy_alloc *alloc = reg->gpu_alloc; + unsigned long nr_pages = + alloc->imported.user_buf.nr_pages; + + if (alloc->imported.user_buf.mm != current->mm) { + ret = -EINVAL; + goto out_unlock; + } + buffers[i].extres_pages = kcalloc(nr_pages, + sizeof(struct page *), GFP_KERNEL); + if (!buffers[i].extres_pages) { + ret = -ENOMEM; + goto out_unlock; + } + + ret = get_user_pages_fast( + alloc->imported.user_buf.address, + nr_pages, 0, + buffers[i].extres_pages); + if (ret != nr_pages) + goto out_unlock; + ret = 0; + break; + } + case KBASE_MEM_TYPE_IMPORTED_UMP: + { + dev_warn(katom->kctx->kbdev->dev, + "UMP is not supported for debug_copy jobs\n"); + ret = -EINVAL; + goto out_unlock; + } + default: + /* Nothing to be done. */ + break; + } + kbase_gpu_vm_unlock(katom->kctx); + } + kfree(user_buffers); + + return ret; + +out_unlock: + kbase_gpu_vm_unlock(katom->kctx); + +out_cleanup: + /* Frees allocated memory for kbase_debug_copy_job struct, including + * members, and sets jc to 0 */ + kbase_debug_copy_finish(katom); + kfree(user_buffers); + + return ret; +} + +static void kbase_mem_copy_from_extres_page(struct kbase_context *kctx, + void *extres_page, struct page **pages, unsigned int nr_pages, + unsigned int *target_page_nr, size_t offset, size_t *to_copy) +{ + void *target_page = kmap(pages[*target_page_nr]); + size_t chunk = PAGE_SIZE-offset; + + lockdep_assert_held(&kctx->reg_lock); + + if (!target_page) { + *target_page_nr += 1; + dev_warn(kctx->kbdev->dev, "kmap failed in debug_copy job."); + return; + } + + chunk = min(chunk, *to_copy); + + memcpy(target_page + offset, extres_page, chunk); + *to_copy -= chunk; + + kunmap(pages[*target_page_nr]); + + *target_page_nr += 1; + if (*target_page_nr >= nr_pages) + return; + + target_page = kmap(pages[*target_page_nr]); + if (!target_page) { + *target_page_nr += 1; + dev_warn(kctx->kbdev->dev, "kmap failed in debug_copy job."); + return; + } + + KBASE_DEBUG_ASSERT(target_page); + + chunk = min(offset, *to_copy); + memcpy(target_page, extres_page + PAGE_SIZE-offset, chunk); + *to_copy -= chunk; + + kunmap(pages[*target_page_nr]); +} + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0) +static void *dma_buf_kmap_page(struct kbase_mem_phy_alloc *gpu_alloc, + unsigned long page_num, struct page **page) +{ + struct sg_table *sgt = gpu_alloc->imported.umm.sgt; + struct sg_page_iter sg_iter; + unsigned long page_index = 0; + + if (WARN_ON(gpu_alloc->type != KBASE_MEM_TYPE_IMPORTED_UMM)) + return NULL; + + if (!sgt) + return NULL; + + if (WARN_ON(page_num >= gpu_alloc->nents)) + return NULL; + + for_each_sg_page(sgt->sgl, &sg_iter, sgt->nents, 0) { + if (page_index == page_num) { + *page = sg_page_iter_page(&sg_iter); + + return kmap(*page); + } + page_index++; + } + + return NULL; +} +#endif + +static int kbase_mem_copy_from_extres(struct kbase_context *kctx, + struct kbase_debug_copy_buffer *buf_data) +{ + unsigned int i; + unsigned int target_page_nr = 0; + struct page **pages = buf_data->pages; + u64 offset = buf_data->offset; + size_t extres_size = buf_data->nr_extres_pages*PAGE_SIZE; + size_t to_copy = min(extres_size, buf_data->size); + size_t dma_to_copy; + struct kbase_mem_phy_alloc *gpu_alloc = buf_data->gpu_alloc; + int ret = 0; + + KBASE_DEBUG_ASSERT(pages != NULL); + + kbase_gpu_vm_lock(kctx); + if (!gpu_alloc) { + ret = -EINVAL; + goto out_unlock; + } + + switch (gpu_alloc->type) { + case KBASE_MEM_TYPE_IMPORTED_USER_BUF: + { + for (i = 0; i < buf_data->nr_extres_pages; i++) { + struct page *pg = buf_data->extres_pages[i]; + void *extres_page = kmap(pg); + + if (extres_page) + kbase_mem_copy_from_extres_page(kctx, + extres_page, pages, + buf_data->nr_pages, + &target_page_nr, + offset, &to_copy); + + kunmap(pg); + if (target_page_nr >= buf_data->nr_pages) + break; + } + break; + } + break; +#ifdef CONFIG_DMA_SHARED_BUFFER + case KBASE_MEM_TYPE_IMPORTED_UMM: { + struct dma_buf *dma_buf = gpu_alloc->imported.umm.dma_buf; + + KBASE_DEBUG_ASSERT(dma_buf != NULL); + if (dma_buf->size > buf_data->nr_extres_pages * PAGE_SIZE) + dev_warn(kctx->kbdev->dev, "External resources buffer size mismatch"); + + dma_to_copy = min(dma_buf->size, + (size_t)(buf_data->nr_extres_pages * PAGE_SIZE)); + ret = dma_buf_begin_cpu_access(dma_buf, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) && !defined(CONFIG_CHROMEOS) + 0, dma_to_copy, +#endif + DMA_FROM_DEVICE); + if (ret) + goto out_unlock; + + for (i = 0; i < dma_to_copy/PAGE_SIZE; i++) { + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0) + struct page *pg; + void *extres_page = dma_buf_kmap_page(gpu_alloc, i, &pg); +#else + void *extres_page = dma_buf_kmap(dma_buf, i); +#endif + + if (extres_page) + kbase_mem_copy_from_extres_page(kctx, + extres_page, pages, + buf_data->nr_pages, + &target_page_nr, + offset, &to_copy); + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0) + kunmap(pg); +#else + dma_buf_kunmap(dma_buf, i, extres_page); +#endif + if (target_page_nr >= buf_data->nr_pages) + break; + } + dma_buf_end_cpu_access(dma_buf, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) && !defined(CONFIG_CHROMEOS) + 0, dma_to_copy, +#endif + DMA_FROM_DEVICE); + break; + } +#endif + default: + ret = -EINVAL; + } +out_unlock: + kbase_gpu_vm_unlock(kctx); + return ret; + +} + +static int kbase_debug_copy(struct kbase_jd_atom *katom) +{ + struct kbase_debug_copy_buffer *buffers = + (struct kbase_debug_copy_buffer *)(uintptr_t)katom->jc; + unsigned int i; + + for (i = 0; i < katom->nr_extres; i++) { + int res = kbase_mem_copy_from_extres(katom->kctx, &buffers[i]); + + if (res) + return res; + } + + return 0; +} + +static int kbase_jit_allocate_prepare(struct kbase_jd_atom *katom) +{ + __user void *data = (__user void *)(uintptr_t) katom->jc; + struct base_jit_alloc_info *info; + struct kbase_context *kctx = katom->kctx; + int ret; + + /* Fail the job if there is no info structure */ + if (!data) { + ret = -EINVAL; + goto fail; + } + + /* Copy the information for safe access and future storage */ + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) { + ret = -ENOMEM; + goto fail; + } + + if (copy_from_user(info, data, sizeof(*info)) != 0) { + ret = -EINVAL; + goto free_info; + } + + /* If the ID is zero then fail the job */ + if (info->id == 0) { + ret = -EINVAL; + goto free_info; + } + + /* Sanity check that the PA fits within the VA */ + if (info->va_pages < info->commit_pages) { + ret = -EINVAL; + goto free_info; + } + + /* Ensure the GPU address is correctly aligned */ + if ((info->gpu_alloc_addr & 0x7) != 0) { + ret = -EINVAL; + goto free_info; + } + + /* Replace the user pointer with our kernel allocated info structure */ + katom->jc = (u64)(uintptr_t) info; + katom->jit_blocked = false; + + lockdep_assert_held(&kctx->jctx.lock); + list_add_tail(&katom->jit_node, &kctx->jit_atoms_head); + + /* + * Note: + * The provided info->gpu_alloc_addr isn't validated here as + * userland can cache allocations which means that even + * though the region is valid it doesn't represent the + * same thing it used to. + * + * Complete validation of va_pages, commit_pages and extent + * isn't done here as it will be done during the call to + * kbase_mem_alloc. + */ + return 0; + +free_info: + kfree(info); +fail: + katom->jc = 0; + return ret; +} + +static u8 kbase_jit_free_get_id(struct kbase_jd_atom *katom) +{ + if (WARN_ON(katom->core_req != BASE_JD_REQ_SOFT_JIT_FREE)) + return 0; + + return (u8) katom->jc; +} + +static int kbase_jit_allocate_process(struct kbase_jd_atom *katom) +{ + struct kbase_context *kctx = katom->kctx; + struct base_jit_alloc_info *info; + struct kbase_va_region *reg; + struct kbase_vmap_struct mapping; + u64 *ptr, new_addr; + + if (katom->jit_blocked) { + list_del(&katom->queue); + katom->jit_blocked = false; + } + + info = (struct base_jit_alloc_info *) (uintptr_t) katom->jc; + + /* The JIT ID is still in use so fail the allocation */ + if (kctx->jit_alloc[info->id]) { + katom->event_code = BASE_JD_EVENT_MEM_GROWTH_FAILED; + return 0; + } + + /* Create a JIT allocation */ + reg = kbase_jit_allocate(kctx, info); + if (!reg) { + struct kbase_jd_atom *jit_atom; + bool can_block = false; + + lockdep_assert_held(&kctx->jctx.lock); + + jit_atom = list_first_entry(&kctx->jit_atoms_head, + struct kbase_jd_atom, jit_node); + + list_for_each_entry(jit_atom, &kctx->jit_atoms_head, jit_node) { + if (jit_atom == katom) + break; + if (jit_atom->core_req == BASE_JD_REQ_SOFT_JIT_FREE) { + u8 free_id = kbase_jit_free_get_id(jit_atom); + + if (free_id && kctx->jit_alloc[free_id]) { + /* A JIT free which is active and + * submitted before this atom + */ + can_block = true; + break; + } + } + } + + if (!can_block) { + /* Mark the allocation so we know it's in use even if + * the allocation itself fails. + */ + kctx->jit_alloc[info->id] = + (struct kbase_va_region *) -1; + + katom->event_code = BASE_JD_EVENT_MEM_GROWTH_FAILED; + return 0; + } + + /* There are pending frees for an active allocation + * so we should wait to see whether they free the memory. + * Add to the beginning of the list to ensure that the atom is + * processed only once in kbase_jit_free_finish + */ + list_add(&katom->queue, &kctx->jit_pending_alloc); + katom->jit_blocked = true; + + return 1; + } + + /* + * Write the address of the JIT allocation to the user provided + * GPU allocation. + */ + ptr = kbase_vmap(kctx, info->gpu_alloc_addr, sizeof(*ptr), + &mapping); + if (!ptr) { + /* + * Leave the allocation "live" as the JIT free jit will be + * submitted anyway. + */ + katom->event_code = BASE_JD_EVENT_JOB_INVALID; + return 0; + } + + new_addr = reg->start_pfn << PAGE_SHIFT; + *ptr = new_addr; + KBASE_TLSTREAM_TL_ATTRIB_ATOM_JIT( + katom, info->gpu_alloc_addr, new_addr); + kbase_vunmap(kctx, &mapping); + + katom->event_code = BASE_JD_EVENT_DONE; + + /* + * Bind it to the user provided ID. Do this last so we can check for + * the JIT free racing this JIT alloc job. + */ + kctx->jit_alloc[info->id] = reg; + + return 0; +} + +static void kbase_jit_allocate_finish(struct kbase_jd_atom *katom) +{ + struct base_jit_alloc_info *info; + + lockdep_assert_held(&katom->kctx->jctx.lock); + + /* Remove atom from jit_atoms_head list */ + list_del(&katom->jit_node); + + if (katom->jit_blocked) { + list_del(&katom->queue); + katom->jit_blocked = false; + } + + info = (struct base_jit_alloc_info *) (uintptr_t) katom->jc; + /* Free the info structure */ + kfree(info); +} + +static int kbase_jit_free_prepare(struct kbase_jd_atom *katom) +{ + struct kbase_context *kctx = katom->kctx; + + lockdep_assert_held(&kctx->jctx.lock); + list_add_tail(&katom->jit_node, &kctx->jit_atoms_head); + + return 0; +} + +static void kbase_jit_free_process(struct kbase_jd_atom *katom) +{ + struct kbase_context *kctx = katom->kctx; + u8 id = kbase_jit_free_get_id(katom); + + /* + * If the ID is zero or it is not in use yet then fail the job. + */ + if ((id == 0) || (kctx->jit_alloc[id] == NULL)) { + katom->event_code = BASE_JD_EVENT_JOB_INVALID; + return; + } + + /* + * If the ID is valid but the allocation request failed still succeed + * this soft job but don't try and free the allocation. + */ + if (kctx->jit_alloc[id] != (struct kbase_va_region *) -1) + kbase_jit_free(kctx, kctx->jit_alloc[id]); + + kctx->jit_alloc[id] = NULL; +} + +static void kbasep_jit_free_finish_worker(struct work_struct *work) +{ + struct kbase_jd_atom *katom = container_of(work, struct kbase_jd_atom, + work); + struct kbase_context *kctx = katom->kctx; + int resched; + + mutex_lock(&kctx->jctx.lock); + kbase_finish_soft_job(katom); + resched = jd_done_nolock(katom, NULL); + mutex_unlock(&kctx->jctx.lock); + + if (resched) + kbase_js_sched_all(kctx->kbdev); +} + +static void kbase_jit_free_finish(struct kbase_jd_atom *katom) +{ + struct list_head *i, *tmp; + struct kbase_context *kctx = katom->kctx; + + lockdep_assert_held(&kctx->jctx.lock); + /* Remove this atom from the kctx->jit_atoms_head list */ + list_del(&katom->jit_node); + + list_for_each_safe(i, tmp, &kctx->jit_pending_alloc) { + struct kbase_jd_atom *pending_atom = list_entry(i, + struct kbase_jd_atom, queue); + if (kbase_jit_allocate_process(pending_atom) == 0) { + /* Atom has completed */ + INIT_WORK(&pending_atom->work, + kbasep_jit_free_finish_worker); + queue_work(kctx->jctx.job_done_wq, &pending_atom->work); + } + } +} + +static int kbase_ext_res_prepare(struct kbase_jd_atom *katom) +{ + __user struct base_external_resource_list *user_ext_res; + struct base_external_resource_list *ext_res; + u64 count = 0; + size_t copy_size; + int ret; + + user_ext_res = (__user struct base_external_resource_list *) + (uintptr_t) katom->jc; + + /* Fail the job if there is no info structure */ + if (!user_ext_res) { + ret = -EINVAL; + goto fail; + } + + if (copy_from_user(&count, &user_ext_res->count, sizeof(u64)) != 0) { + ret = -EINVAL; + goto fail; + } + + /* Is the number of external resources in range? */ + if (!count || count > BASE_EXT_RES_COUNT_MAX) { + ret = -EINVAL; + goto fail; + } + + /* Copy the information for safe access and future storage */ + copy_size = sizeof(*ext_res); + copy_size += sizeof(struct base_external_resource) * (count - 1); + ext_res = kzalloc(copy_size, GFP_KERNEL); + if (!ext_res) { + ret = -ENOMEM; + goto fail; + } + + if (copy_from_user(ext_res, user_ext_res, copy_size) != 0) { + ret = -EINVAL; + goto free_info; + } + + /* + * Overwrite the count with the first value incase it was changed + * after the fact. + */ + ext_res->count = count; + + /* + * Replace the user pointer with our kernel allocated + * ext_res structure. + */ + katom->jc = (u64)(uintptr_t) ext_res; + + return 0; + +free_info: + kfree(ext_res); +fail: + return ret; +} + +static void kbase_ext_res_process(struct kbase_jd_atom *katom, bool map) +{ + struct base_external_resource_list *ext_res; + int i; + bool failed = false; + + ext_res = (struct base_external_resource_list *) (uintptr_t) katom->jc; + if (!ext_res) + goto failed_jc; + + kbase_gpu_vm_lock(katom->kctx); + + for (i = 0; i < ext_res->count; i++) { + u64 gpu_addr; + + gpu_addr = ext_res->ext_res[i].ext_resource & + ~BASE_EXT_RES_ACCESS_EXCLUSIVE; + if (map) { + if (!kbase_sticky_resource_acquire(katom->kctx, + gpu_addr)) + goto failed_loop; + } else + if (!kbase_sticky_resource_release(katom->kctx, NULL, + gpu_addr)) + failed = true; + } + + /* + * In the case of unmap we continue unmapping other resources in the + * case of failure but will always report failure if _any_ unmap + * request fails. + */ + if (failed) + katom->event_code = BASE_JD_EVENT_JOB_INVALID; + else + katom->event_code = BASE_JD_EVENT_DONE; + + kbase_gpu_vm_unlock(katom->kctx); + + return; + +failed_loop: + while (i > 0) { + u64 const gpu_addr = ext_res->ext_res[i - 1].ext_resource & + ~BASE_EXT_RES_ACCESS_EXCLUSIVE; + + kbase_sticky_resource_release(katom->kctx, NULL, gpu_addr); + + --i; + } + + katom->event_code = BASE_JD_EVENT_JOB_INVALID; + kbase_gpu_vm_unlock(katom->kctx); + +failed_jc: + return; +} + +static void kbase_ext_res_finish(struct kbase_jd_atom *katom) +{ + struct base_external_resource_list *ext_res; + + ext_res = (struct base_external_resource_list *) (uintptr_t) katom->jc; + /* Free the info structure */ + kfree(ext_res); +} + +int kbase_process_soft_job(struct kbase_jd_atom *katom) +{ + switch (katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) { + case BASE_JD_REQ_SOFT_DUMP_CPU_GPU_TIME: + return kbase_dump_cpu_gpu_time(katom); + +#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE) + case BASE_JD_REQ_SOFT_FENCE_TRIGGER: + katom->event_code = kbase_sync_fence_out_trigger(katom, + katom->event_code == BASE_JD_EVENT_DONE ? + 0 : -EFAULT); + break; + case BASE_JD_REQ_SOFT_FENCE_WAIT: + { + int ret = kbase_sync_fence_in_wait(katom); + + if (ret == 1) { +#ifdef CONFIG_MALI_FENCE_DEBUG + kbasep_add_waiting_with_timeout(katom); +#else + kbasep_add_waiting_soft_job(katom); +#endif + } + return ret; + } +#endif + + case BASE_JD_REQ_SOFT_REPLAY: + return kbase_replay_process(katom); + case BASE_JD_REQ_SOFT_EVENT_WAIT: + return kbasep_soft_event_wait(katom); + case BASE_JD_REQ_SOFT_EVENT_SET: + kbasep_soft_event_update_locked(katom, BASE_JD_SOFT_EVENT_SET); + break; + case BASE_JD_REQ_SOFT_EVENT_RESET: + kbasep_soft_event_update_locked(katom, BASE_JD_SOFT_EVENT_RESET); + break; + case BASE_JD_REQ_SOFT_DEBUG_COPY: + { + int res = kbase_debug_copy(katom); + + if (res) + katom->event_code = BASE_JD_EVENT_JOB_INVALID; + break; + } + case BASE_JD_REQ_SOFT_JIT_ALLOC: + return kbase_jit_allocate_process(katom); + case BASE_JD_REQ_SOFT_JIT_FREE: + kbase_jit_free_process(katom); + break; + case BASE_JD_REQ_SOFT_EXT_RES_MAP: + kbase_ext_res_process(katom, true); + break; + case BASE_JD_REQ_SOFT_EXT_RES_UNMAP: + kbase_ext_res_process(katom, false); + break; + } + + /* Atom is complete */ + return 0; +} + +void kbase_cancel_soft_job(struct kbase_jd_atom *katom) +{ + switch (katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) { +#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE) + case BASE_JD_REQ_SOFT_FENCE_WAIT: + kbase_sync_fence_in_cancel_wait(katom); + break; +#endif + case BASE_JD_REQ_SOFT_EVENT_WAIT: + kbasep_soft_event_cancel_job(katom); + break; + default: + /* This soft-job doesn't support cancellation! */ + KBASE_DEBUG_ASSERT(0); + } +} + +int kbase_prepare_soft_job(struct kbase_jd_atom *katom) +{ + switch (katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) { + case BASE_JD_REQ_SOFT_DUMP_CPU_GPU_TIME: + { + if (!IS_ALIGNED(katom->jc, cache_line_size())) + return -EINVAL; + } + break; +#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE) + case BASE_JD_REQ_SOFT_FENCE_TRIGGER: + { + struct base_fence fence; + int fd; + + if (0 != copy_from_user(&fence, (__user void *)(uintptr_t) katom->jc, sizeof(fence))) + return -EINVAL; + + fd = kbase_sync_fence_out_create(katom, + fence.basep.stream_fd); + if (fd < 0) + return -EINVAL; + + fence.basep.fd = fd; + if (0 != copy_to_user((__user void *)(uintptr_t) katom->jc, &fence, sizeof(fence))) { + kbase_sync_fence_out_remove(katom); + kbase_sync_fence_close_fd(fd); + fence.basep.fd = -EINVAL; + return -EINVAL; + } + } + break; + case BASE_JD_REQ_SOFT_FENCE_WAIT: + { + struct base_fence fence; + int ret; + + if (0 != copy_from_user(&fence, (__user void *)(uintptr_t) katom->jc, sizeof(fence))) + return -EINVAL; + + /* Get a reference to the fence object */ + ret = kbase_sync_fence_in_from_fd(katom, + fence.basep.fd); + if (ret < 0) + return ret; + +#ifdef CONFIG_MALI_DMA_FENCE + /* + * Set KCTX_NO_IMPLICIT_FENCE in the context the first + * time a soft fence wait job is observed. This will + * prevent the implicit dma-buf fence to conflict with + * the Android native sync fences. + */ + if (!kbase_ctx_flag(katom->kctx, KCTX_NO_IMPLICIT_SYNC)) + kbase_ctx_flag_set(katom->kctx, KCTX_NO_IMPLICIT_SYNC); +#endif /* CONFIG_MALI_DMA_FENCE */ + } + break; +#endif /* CONFIG_SYNC || CONFIG_SYNC_FILE */ + case BASE_JD_REQ_SOFT_JIT_ALLOC: + return kbase_jit_allocate_prepare(katom); + case BASE_JD_REQ_SOFT_REPLAY: + break; + case BASE_JD_REQ_SOFT_JIT_FREE: + return kbase_jit_free_prepare(katom); + case BASE_JD_REQ_SOFT_EVENT_WAIT: + case BASE_JD_REQ_SOFT_EVENT_SET: + case BASE_JD_REQ_SOFT_EVENT_RESET: + if (katom->jc == 0) + return -EINVAL; + break; + case BASE_JD_REQ_SOFT_DEBUG_COPY: + return kbase_debug_copy_prepare(katom); + case BASE_JD_REQ_SOFT_EXT_RES_MAP: + return kbase_ext_res_prepare(katom); + case BASE_JD_REQ_SOFT_EXT_RES_UNMAP: + return kbase_ext_res_prepare(katom); + default: + /* Unsupported soft-job */ + return -EINVAL; + } + return 0; +} + +void kbase_finish_soft_job(struct kbase_jd_atom *katom) +{ + switch (katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) { + case BASE_JD_REQ_SOFT_DUMP_CPU_GPU_TIME: + /* Nothing to do */ + break; +#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE) + case BASE_JD_REQ_SOFT_FENCE_TRIGGER: + /* If fence has not yet been signaled, do it now */ + kbase_sync_fence_out_trigger(katom, katom->event_code == + BASE_JD_EVENT_DONE ? 0 : -EFAULT); + break; + case BASE_JD_REQ_SOFT_FENCE_WAIT: + /* Release katom's reference to fence object */ + kbase_sync_fence_in_remove(katom); + break; +#endif /* CONFIG_SYNC || CONFIG_SYNC_FILE */ + case BASE_JD_REQ_SOFT_DEBUG_COPY: + kbase_debug_copy_finish(katom); + break; + case BASE_JD_REQ_SOFT_JIT_ALLOC: + kbase_jit_allocate_finish(katom); + break; + case BASE_JD_REQ_SOFT_EXT_RES_MAP: + kbase_ext_res_finish(katom); + break; + case BASE_JD_REQ_SOFT_EXT_RES_UNMAP: + kbase_ext_res_finish(katom); + break; + case BASE_JD_REQ_SOFT_JIT_FREE: + kbase_jit_free_finish(katom); + break; + } +} + +void kbase_resume_suspended_soft_jobs(struct kbase_device *kbdev) +{ + LIST_HEAD(local_suspended_soft_jobs); + struct kbase_jd_atom *tmp_iter; + struct kbase_jd_atom *katom_iter; + struct kbasep_js_device_data *js_devdata; + bool resched = false; + + KBASE_DEBUG_ASSERT(kbdev); + + js_devdata = &kbdev->js_data; + + /* Move out the entire list */ + mutex_lock(&js_devdata->runpool_mutex); + list_splice_init(&js_devdata->suspended_soft_jobs_list, + &local_suspended_soft_jobs); + mutex_unlock(&js_devdata->runpool_mutex); + + /* + * Each atom must be detached from the list and ran separately - + * it could be re-added to the old list, but this is unlikely + */ + list_for_each_entry_safe(katom_iter, tmp_iter, + &local_suspended_soft_jobs, dep_item[1]) { + struct kbase_context *kctx = katom_iter->kctx; + + mutex_lock(&kctx->jctx.lock); + + /* Remove from the global list */ + list_del(&katom_iter->dep_item[1]); + /* Remove from the context's list of waiting soft jobs */ + kbasep_remove_waiting_soft_job(katom_iter); + + if (kbase_process_soft_job(katom_iter) == 0) { + kbase_finish_soft_job(katom_iter); + resched |= jd_done_nolock(katom_iter, NULL); + } else { + KBASE_DEBUG_ASSERT((katom_iter->core_req & + BASE_JD_REQ_SOFT_JOB_TYPE) + != BASE_JD_REQ_SOFT_REPLAY); + } + + mutex_unlock(&kctx->jctx.lock); + } + + if (resched) + kbase_js_sched_all(kbdev); +} diff --git a/drivers/gpu/arm/midgard/mali_kbase_strings.c b/drivers/gpu/arm/midgard/mali_kbase_strings.c new file mode 100644 index 00000000000000..c98762cec24412 --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_strings.c @@ -0,0 +1,23 @@ + /* + * + * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + +#include "mali_kbase_strings.h" + +#define KBASE_DRV_NAME "mali" +#define KBASE_TIMELINE_NAME KBASE_DRV_NAME ".timeline" + +const char kbase_drv_name[] = KBASE_DRV_NAME; +const char kbase_timeline_name[] = KBASE_TIMELINE_NAME; diff --git a/drivers/gpu/arm/midgard/mali_kbase_strings.h b/drivers/gpu/arm/midgard/mali_kbase_strings.h new file mode 100644 index 00000000000000..41b8fdbec6a436 --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_strings.h @@ -0,0 +1,19 @@ +/* + * + * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +extern const char kbase_drv_name[]; +extern const char kbase_timeline_name[]; diff --git a/drivers/gpu/arm/midgard/mali_kbase_sync.h b/drivers/gpu/arm/midgard/mali_kbase_sync.h new file mode 100644 index 00000000000000..33b580595563b9 --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_sync.h @@ -0,0 +1,203 @@ +/* + * + * (C) COPYRIGHT 2012-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +/** + * @file mali_kbase_sync.h + * + * This file contains our internal "API" for explicit fences. + * It hides the implementation details of the actual explicit fence mechanism + * used (Android fences or sync file with DMA fences). + */ + +#ifndef MALI_KBASE_SYNC_H +#define MALI_KBASE_SYNC_H + +#include +#ifdef CONFIG_SYNC +#include +#endif +#ifdef CONFIG_SYNC_FILE +#include "mali_kbase_fence_defs.h" +#include +#endif + +#include "mali_kbase.h" + +/** + * struct kbase_sync_fence_info - Information about a fence + * @fence: Pointer to fence (type is void*, as underlaying struct can differ) + * @name: The name given to this fence when it was created + * @status: < 0 means error, 0 means active, 1 means signaled + * + * Use kbase_sync_fence_in_info_get() or kbase_sync_fence_out_info_get() + * to get the information. + */ +struct kbase_sync_fence_info { + void *fence; + char name[32]; + int status; +}; + +/** + * kbase_sync_fence_stream_create() - Create a stream object + * @name: Name of stream (only used to ease debugging/visualization) + * @out_fd: A file descriptor representing the created stream object + * + * Can map down to a timeline implementation in some implementations. + * Exposed as a file descriptor. + * Life-time controlled via the file descriptor: + * - dup to add a ref + * - close to remove a ref + * + * return: 0 on success, < 0 on error + */ +int kbase_sync_fence_stream_create(const char *name, int *const out_fd); + +/** + * kbase_sync_fence_out_create Create an explicit output fence to specified atom + * @katom: Atom to assign the new explicit fence to + * @stream_fd: File descriptor for stream object to create fence on + * + * return: Valid file descriptor to fence or < 0 on error + */ +int kbase_sync_fence_out_create(struct kbase_jd_atom *katom, int stream_fd); + +/** + * kbase_sync_fence_in_from_fd() Assigns an existing fence to specified atom + * @katom: Atom to assign the existing explicit fence to + * @fd: File descriptor to an existing fence + * + * Assigns an explicit input fence to atom. + * This can later be waited for by calling @kbase_sync_fence_in_wait + * + * return: 0 on success, < 0 on error + */ +int kbase_sync_fence_in_from_fd(struct kbase_jd_atom *katom, int fd); + +/** + * kbase_sync_fence_validate() - Validate a fd to be a valid fence + * @fd: File descriptor to check + * + * This function is only usable to catch unintentional user errors early, + * it does not stop malicious code changing the fd after this function returns. + * + * return 0: if fd is for a valid fence, < 0 if invalid + */ +int kbase_sync_fence_validate(int fd); + +/** + * kbase_sync_fence_out_trigger - Signal explicit output fence attached on katom + * @katom: Atom with an explicit fence to signal + * @result: < 0 means signal with error, 0 >= indicates success + * + * Signal output fence attached on katom and remove the fence from the atom. + * + * return: The "next" event code for atom, typically JOB_CANCELLED or EVENT_DONE + */ +enum base_jd_event_code +kbase_sync_fence_out_trigger(struct kbase_jd_atom *katom, int result); + +/** + * kbase_sync_fence_in_wait() - Wait for explicit input fence to be signaled + * @katom: Atom with explicit fence to wait for + * + * If the fence is already signaled, then 0 is returned, and the caller must + * continue processing of the katom. + * + * If the fence isn't already signaled, then this kbase_sync framework will + * take responsibility to continue the processing once the fence is signaled. + * + * return: 0 if already signaled, otherwise 1 + */ +int kbase_sync_fence_in_wait(struct kbase_jd_atom *katom); + +/** + * kbase_sync_fence_in_cancel_wait() - Cancel explicit input fence waits + * @katom: Atom to cancel wait for + * + * This function is fully responsible for continuing processing of this atom + * (remove_waiting_soft_job + finish_soft_job + jd_done + js_sched_all) + */ +void kbase_sync_fence_in_cancel_wait(struct kbase_jd_atom *katom); + +/** + * kbase_sync_fence_in_remove() - Remove the input fence from the katom + * @katom: Atom to remove explicit input fence for + * + * This will also release the corresponding reference. + */ +void kbase_sync_fence_in_remove(struct kbase_jd_atom *katom); + +/** + * kbase_sync_fence_out_remove() - Remove the output fence from the katom + * @katom: Atom to remove explicit output fence for + * + * This will also release the corresponding reference. + */ +void kbase_sync_fence_out_remove(struct kbase_jd_atom *katom); + +/** + * kbase_sync_fence_close_fd() - Close a file descriptor representing a fence + * @fd: File descriptor to close + */ +static inline void kbase_sync_fence_close_fd(int fd) +{ + ksys_close(fd); +} + +/** + * kbase_sync_fence_in_info_get() - Retrieves information about input fence + * @katom: Atom to get fence information from + * @info: Struct to be filled with fence information + * + * return: 0 on success, < 0 on error + */ +int kbase_sync_fence_in_info_get(struct kbase_jd_atom *katom, + struct kbase_sync_fence_info *info); + +/** + * kbase_sync_fence_out_info_get() - Retrieves information about output fence + * @katom: Atom to get fence information from + * @info: Struct to be filled with fence information + * + * return: 0 on success, < 0 on error + */ +int kbase_sync_fence_out_info_get(struct kbase_jd_atom *katom, + struct kbase_sync_fence_info *info); + +/** + * kbase_sync_status_string() - Get string matching @status + * @status: Value of fence status. + * + * return: Pointer to string describing @status. + */ +const char *kbase_sync_status_string(int status); + +/* + * Internal worker used to continue processing of atom. + */ +void kbase_sync_fence_wait_worker(struct work_struct *data); + +#ifdef CONFIG_MALI_FENCE_DEBUG +/** + * kbase_sync_fence_in_dump() Trigger a debug dump of atoms input fence state + * @katom: Atom to trigger fence debug dump for + */ +void kbase_sync_fence_in_dump(struct kbase_jd_atom *katom); +#endif + +#endif /* MALI_KBASE_SYNC_H */ diff --git a/drivers/gpu/arm/midgard/mali_kbase_sync_android.c b/drivers/gpu/arm/midgard/mali_kbase_sync_android.c new file mode 100644 index 00000000000000..d7349dcae69a07 --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_sync_android.c @@ -0,0 +1,537 @@ +/* + * + * (C) COPYRIGHT 2012-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +/* + * Code for supporting explicit Android fences (CONFIG_SYNC) + * Known to be good for kernels 4.5 and earlier. + * Replaced with CONFIG_SYNC_FILE for 4.9 and later kernels + * (see mali_kbase_sync_file.c) + */ + +#include +#include +#include +#include +#include +#include +#include +#include "sync.h" +#include +#include + +struct mali_sync_timeline { + struct sync_timeline timeline; + atomic_t counter; + atomic_t signaled; +}; + +struct mali_sync_pt { + struct sync_pt pt; + int order; + int result; +}; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0) +/* For backwards compatibility with kernels before 3.17. After 3.17 + * sync_pt_parent is included in the kernel. */ +static inline struct sync_timeline *sync_pt_parent(struct sync_pt *pt) +{ + return pt->parent; +} +#endif + +static struct mali_sync_timeline *to_mali_sync_timeline( + struct sync_timeline *timeline) +{ + return container_of(timeline, struct mali_sync_timeline, timeline); +} + +static struct mali_sync_pt *to_mali_sync_pt(struct sync_pt *pt) +{ + return container_of(pt, struct mali_sync_pt, pt); +} + +static struct sync_pt *timeline_dup(struct sync_pt *pt) +{ + struct mali_sync_pt *mpt = to_mali_sync_pt(pt); + struct mali_sync_pt *new_mpt; + struct sync_pt *new_pt = sync_pt_create(sync_pt_parent(pt), + sizeof(struct mali_sync_pt)); + + if (!new_pt) + return NULL; + + new_mpt = to_mali_sync_pt(new_pt); + new_mpt->order = mpt->order; + new_mpt->result = mpt->result; + + return new_pt; +} + +static int timeline_has_signaled(struct sync_pt *pt) +{ + struct mali_sync_pt *mpt = to_mali_sync_pt(pt); + struct mali_sync_timeline *mtl = to_mali_sync_timeline( + sync_pt_parent(pt)); + int result = mpt->result; + + int diff = atomic_read(&mtl->signaled) - mpt->order; + + if (diff >= 0) + return (result < 0) ? result : 1; + + return 0; +} + +static int timeline_compare(struct sync_pt *a, struct sync_pt *b) +{ + struct mali_sync_pt *ma = container_of(a, struct mali_sync_pt, pt); + struct mali_sync_pt *mb = container_of(b, struct mali_sync_pt, pt); + + int diff = ma->order - mb->order; + + if (diff == 0) + return 0; + + return (diff < 0) ? -1 : 1; +} + +static void timeline_value_str(struct sync_timeline *timeline, char *str, + int size) +{ + struct mali_sync_timeline *mtl = to_mali_sync_timeline(timeline); + + snprintf(str, size, "%d", atomic_read(&mtl->signaled)); +} + +static void pt_value_str(struct sync_pt *pt, char *str, int size) +{ + struct mali_sync_pt *mpt = to_mali_sync_pt(pt); + + snprintf(str, size, "%d(%d)", mpt->order, mpt->result); +} + +static struct sync_timeline_ops mali_timeline_ops = { + .driver_name = "Mali", + .dup = timeline_dup, + .has_signaled = timeline_has_signaled, + .compare = timeline_compare, + .timeline_value_str = timeline_value_str, + .pt_value_str = pt_value_str, +}; + +/* Allocates a timeline for Mali + * + * One timeline should be allocated per API context. + */ +static struct sync_timeline *mali_sync_timeline_alloc(const char *name) +{ + struct sync_timeline *tl; + struct mali_sync_timeline *mtl; + + tl = sync_timeline_create(&mali_timeline_ops, + sizeof(struct mali_sync_timeline), name); + if (!tl) + return NULL; + + /* Set the counter in our private struct */ + mtl = to_mali_sync_timeline(tl); + atomic_set(&mtl->counter, 0); + atomic_set(&mtl->signaled, 0); + + return tl; +} + +static int kbase_stream_close(struct inode *inode, struct file *file) +{ + struct sync_timeline *tl; + + tl = (struct sync_timeline *)file->private_data; + sync_timeline_destroy(tl); + return 0; +} + +static const struct file_operations stream_fops = { + .owner = THIS_MODULE, + .release = kbase_stream_close, +}; + +int kbase_sync_fence_stream_create(const char *name, int *const out_fd) +{ + struct sync_timeline *tl; + + if (!out_fd) + return -EINVAL; + + tl = mali_sync_timeline_alloc(name); + if (!tl) + return -EINVAL; + + *out_fd = anon_inode_getfd(name, &stream_fops, tl, O_RDONLY|O_CLOEXEC); + + if (*out_fd < 0) { + sync_timeline_destroy(tl); + return -EINVAL; + } + + return 0; +} + +/* Allocates a sync point within the timeline. + * + * The timeline must be the one allocated by kbase_sync_timeline_alloc + * + * Sync points must be triggered in *exactly* the same order as they are + * allocated. + */ +static struct sync_pt *kbase_sync_pt_alloc(struct sync_timeline *parent) +{ + struct sync_pt *pt = sync_pt_create(parent, + sizeof(struct mali_sync_pt)); + struct mali_sync_timeline *mtl = to_mali_sync_timeline(parent); + struct mali_sync_pt *mpt; + + if (!pt) + return NULL; + + mpt = to_mali_sync_pt(pt); + mpt->order = atomic_inc_return(&mtl->counter); + mpt->result = 0; + + return pt; +} + +int kbase_sync_fence_out_create(struct kbase_jd_atom *katom, int tl_fd) +{ + struct sync_timeline *tl; + struct sync_pt *pt; + struct sync_fence *fence; +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0) + struct files_struct *files; + struct fdtable *fdt; +#endif + int fd; + struct file *tl_file; + + tl_file = fget(tl_fd); + if (tl_file == NULL) + return -EBADF; + + if (tl_file->f_op != &stream_fops) { + fd = -EBADF; + goto out; + } + + tl = tl_file->private_data; + + pt = kbase_sync_pt_alloc(tl); + if (!pt) { + fd = -EFAULT; + goto out; + } + + fence = sync_fence_create("mali_fence", pt); + if (!fence) { + sync_pt_free(pt); + fd = -EFAULT; + goto out; + } + + /* from here the fence owns the sync_pt */ + + /* create a fd representing the fence */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0) + fd = get_unused_fd_flags(O_RDWR | O_CLOEXEC); + if (fd < 0) { + sync_fence_put(fence); + goto out; + } +#else + fd = get_unused_fd(); + if (fd < 0) { + sync_fence_put(fence); + goto out; + } + + files = current->files; + spin_lock(&files->file_lock); + fdt = files_fdtable(files); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0) + __set_close_on_exec(fd, fdt); +#else + FD_SET(fd, fdt->close_on_exec); +#endif + spin_unlock(&files->file_lock); +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0) */ + + /* bind fence to the new fd */ + sync_fence_install(fence, fd); + + katom->fence = sync_fence_fdget(fd); + if (katom->fence == NULL) { + /* The only way the fence can be NULL is if userspace closed it + * for us, so we don't need to clear it up */ + fd = -EINVAL; + goto out; + } + +out: + fput(tl_file); + + return fd; +} + +int kbase_sync_fence_in_from_fd(struct kbase_jd_atom *katom, int fd) +{ + katom->fence = sync_fence_fdget(fd); + return katom->fence ? 0 : -ENOENT; +} + +int kbase_sync_fence_validate(int fd) +{ + struct sync_fence *fence; + + fence = sync_fence_fdget(fd); + if (!fence) + return -EINVAL; + + sync_fence_put(fence); + return 0; +} + +/* Returns true if the specified timeline is allocated by Mali */ +static int kbase_sync_timeline_is_ours(struct sync_timeline *timeline) +{ + return timeline->ops == &mali_timeline_ops; +} + +/* Signals a particular sync point + * + * Sync points must be triggered in *exactly* the same order as they are + * allocated. + * + * If they are signaled in the wrong order then a message will be printed in + * debug builds and otherwise attempts to signal order sync_pts will be ignored. + * + * result can be negative to indicate error, any other value is interpreted as + * success. + */ +static void kbase_sync_signal_pt(struct sync_pt *pt, int result) +{ + struct mali_sync_pt *mpt = to_mali_sync_pt(pt); + struct mali_sync_timeline *mtl = to_mali_sync_timeline( + sync_pt_parent(pt)); + int signaled; + int diff; + + mpt->result = result; + + do { + signaled = atomic_read(&mtl->signaled); + + diff = signaled - mpt->order; + + if (diff > 0) { + /* The timeline is already at or ahead of this point. + * This should not happen unless userspace has been + * signaling fences out of order, so warn but don't + * violate the sync_pt API. + * The warning is only in debug builds to prevent + * a malicious user being able to spam dmesg. + */ +#ifdef CONFIG_MALI_DEBUG + pr_err("Fences were triggered in a different order to allocation!"); +#endif /* CONFIG_MALI_DEBUG */ + return; + } + } while (atomic_cmpxchg(&mtl->signaled, + signaled, mpt->order) != signaled); +} + +enum base_jd_event_code +kbase_sync_fence_out_trigger(struct kbase_jd_atom *katom, int result) +{ + struct sync_pt *pt; + struct sync_timeline *timeline; + + if (!katom->fence) + return BASE_JD_EVENT_JOB_CANCELLED; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0) + if (!list_is_singular(&katom->fence->pt_list_head)) { +#else + if (katom->fence->num_fences != 1) { +#endif + /* Not exactly one item in the list - so it didn't (directly) + * come from us */ + return BASE_JD_EVENT_JOB_CANCELLED; + } + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0) + pt = list_first_entry(&katom->fence->pt_list_head, + struct sync_pt, pt_list); +#else + pt = container_of(katom->fence->cbs[0].sync_pt, struct sync_pt, base); +#endif + timeline = sync_pt_parent(pt); + + if (!kbase_sync_timeline_is_ours(timeline)) { + /* Fence has a sync_pt which isn't ours! */ + return BASE_JD_EVENT_JOB_CANCELLED; + } + + kbase_sync_signal_pt(pt, result); + + sync_timeline_signal(timeline); + + kbase_sync_fence_out_remove(katom); + + return (result < 0) ? BASE_JD_EVENT_JOB_CANCELLED : BASE_JD_EVENT_DONE; +} + +static inline int kbase_fence_get_status(struct sync_fence *fence) +{ + if (!fence) + return -ENOENT; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0) + return fence->status; +#else + return atomic_read(&fence->status); +#endif +} + +static void kbase_fence_wait_callback(struct sync_fence *fence, + struct sync_fence_waiter *waiter) +{ + struct kbase_jd_atom *katom = container_of(waiter, + struct kbase_jd_atom, sync_waiter); + struct kbase_context *kctx = katom->kctx; + + /* Propagate the fence status to the atom. + * If negative then cancel this atom and its dependencies. + */ + if (kbase_fence_get_status(fence) < 0) + katom->event_code = BASE_JD_EVENT_JOB_CANCELLED; + + /* To prevent a potential deadlock we schedule the work onto the + * job_done_wq workqueue + * + * The issue is that we may signal the timeline while holding + * kctx->jctx.lock and the callbacks are run synchronously from + * sync_timeline_signal. So we simply defer the work. + */ + + INIT_WORK(&katom->work, kbase_sync_fence_wait_worker); + queue_work(kctx->jctx.job_done_wq, &katom->work); +} + +int kbase_sync_fence_in_wait(struct kbase_jd_atom *katom) +{ + int ret; + + sync_fence_waiter_init(&katom->sync_waiter, kbase_fence_wait_callback); + + ret = sync_fence_wait_async(katom->fence, &katom->sync_waiter); + + if (ret == 1) { + /* Already signaled */ + return 0; + } + + if (ret < 0) { + katom->event_code = BASE_JD_EVENT_JOB_CANCELLED; + /* We should cause the dependent jobs in the bag to be failed, + * to do this we schedule the work queue to complete this job */ + INIT_WORK(&katom->work, kbase_sync_fence_wait_worker); + queue_work(katom->kctx->jctx.job_done_wq, &katom->work); + } + + return 1; +} + +void kbase_sync_fence_in_cancel_wait(struct kbase_jd_atom *katom) +{ + if (sync_fence_cancel_async(katom->fence, &katom->sync_waiter) != 0) { + /* The wait wasn't cancelled - leave the cleanup for + * kbase_fence_wait_callback */ + return; + } + + /* Wait was cancelled - zap the atoms */ + katom->event_code = BASE_JD_EVENT_JOB_CANCELLED; + + kbasep_remove_waiting_soft_job(katom); + kbase_finish_soft_job(katom); + + if (jd_done_nolock(katom, NULL)) + kbase_js_sched_all(katom->kctx->kbdev); +} + +void kbase_sync_fence_out_remove(struct kbase_jd_atom *katom) +{ + if (katom->fence) { + sync_fence_put(katom->fence); + katom->fence = NULL; + } +} + +void kbase_sync_fence_in_remove(struct kbase_jd_atom *katom) +{ + if (katom->fence) { + sync_fence_put(katom->fence); + katom->fence = NULL; + } +} + +int kbase_sync_fence_in_info_get(struct kbase_jd_atom *katom, + struct kbase_sync_fence_info *info) +{ + if (!katom->fence) + return -ENOENT; + + info->fence = katom->fence; + info->status = kbase_fence_get_status(katom->fence); + strlcpy(info->name, katom->fence->name, sizeof(info->name)); + + return 0; +} + +int kbase_sync_fence_out_info_get(struct kbase_jd_atom *katom, + struct kbase_sync_fence_info *info) +{ + if (!katom->fence) + return -ENOENT; + + info->fence = katom->fence; + info->status = kbase_fence_get_status(katom->fence); + strlcpy(info->name, katom->fence->name, sizeof(info->name)); + + return 0; +} + +#ifdef CONFIG_MALI_FENCE_DEBUG +void kbase_sync_fence_in_dump(struct kbase_jd_atom *katom) +{ + /* Dump out the full state of all the Android sync fences. + * The function sync_dump() isn't exported to modules, so force + * sync_fence_wait() to time out to trigger sync_dump(). + */ + if (katom->fence) + sync_fence_wait(katom->fence, 1); +} +#endif diff --git a/drivers/gpu/arm/midgard/mali_kbase_sync_common.c b/drivers/gpu/arm/midgard/mali_kbase_sync_common.c new file mode 100644 index 00000000000000..457def29668478 --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_sync_common.c @@ -0,0 +1,43 @@ +/* + * + * (C) COPYRIGHT 2012-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +/* + * @file mali_kbase_sync_common.c + * + * Common code for our explicit fence functionality + */ + +#include +#include "mali_kbase.h" + +void kbase_sync_fence_wait_worker(struct work_struct *data) +{ + struct kbase_jd_atom *katom; + + katom = container_of(data, struct kbase_jd_atom, work); + kbase_soft_event_wait_callback(katom); +} + +const char *kbase_sync_status_string(int status) +{ + if (status == 0) + return "signaled"; + else if (status > 0) + return "active"; + else + return "error"; +} diff --git a/drivers/gpu/arm/midgard/mali_kbase_sync_file.c b/drivers/gpu/arm/midgard/mali_kbase_sync_file.c new file mode 100644 index 00000000000000..ef5b7ce478b4bb --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_sync_file.c @@ -0,0 +1,348 @@ +/* + * + * (C) COPYRIGHT 2012-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +/* + * Code for supporting explicit Linux fences (CONFIG_SYNC_FILE) + * Introduced in kernel 4.9. + * Android explicit fences (CONFIG_SYNC) can be used for older kernels + * (see mali_kbase_sync_android.c) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "mali_kbase_fence_defs.h" +#include "mali_kbase_sync.h" +#include "mali_kbase_fence.h" +#include "mali_kbase.h" + +static const struct file_operations stream_fops = { + .owner = THIS_MODULE +}; + +int kbase_sync_fence_stream_create(const char *name, int *const out_fd) +{ + if (!out_fd) + return -EINVAL; + + *out_fd = anon_inode_getfd(name, &stream_fops, NULL, + O_RDONLY | O_CLOEXEC); + if (*out_fd < 0) + return -EINVAL; + + return 0; +} + +int kbase_sync_fence_out_create(struct kbase_jd_atom *katom, int stream_fd) +{ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)) + struct fence *fence; +#else + struct dma_fence *fence; +#endif + struct sync_file *sync_file; + int fd; + + fence = kbase_fence_out_new(katom); + if (!fence) + return -ENOMEM; + + /* Take an extra reference to the fence on behalf of the katom. + * This is needed because sync_file_create() will take ownership of + * one of these refs */ + dma_fence_get(fence); + + /* create a sync_file fd representing the fence */ + sync_file = sync_file_create(fence); + if (!sync_file) { + dma_fence_put(fence); + kbase_fence_out_remove(katom); + return -ENOMEM; + } + + fd = get_unused_fd_flags(O_CLOEXEC); + if (fd < 0) { + fput(sync_file->file); + kbase_fence_out_remove(katom); + return fd; + } + + fd_install(fd, sync_file->file); + + return fd; +} + +int kbase_sync_fence_in_from_fd(struct kbase_jd_atom *katom, int fd) +{ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)) + struct fence *fence = sync_file_get_fence(fd); +#else + struct dma_fence *fence = sync_file_get_fence(fd); +#endif + + if (!fence) + return -ENOENT; + + kbase_fence_fence_in_set(katom, fence); + + return 0; +} + +int kbase_sync_fence_validate(int fd) +{ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)) + struct fence *fence = sync_file_get_fence(fd); +#else + struct dma_fence *fence = sync_file_get_fence(fd); +#endif + + if (!fence) + return -EINVAL; + + dma_fence_put(fence); + + return 0; /* valid */ +} + +enum base_jd_event_code +kbase_sync_fence_out_trigger(struct kbase_jd_atom *katom, int result) +{ + int res; + + if (!kbase_fence_out_is_ours(katom)) { + /* Not our fence */ + return BASE_JD_EVENT_JOB_CANCELLED; + } + + res = kbase_fence_out_signal(katom, result); + if (unlikely(res < 0)) { + dev_warn(katom->kctx->kbdev->dev, + "fence_signal() failed with %d\n", res); + } + + kbase_sync_fence_out_remove(katom); + + return (result != 0) ? BASE_JD_EVENT_JOB_CANCELLED : BASE_JD_EVENT_DONE; +} + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)) +static void kbase_fence_wait_callback(struct fence *fence, + struct fence_cb *cb) +#else +static void kbase_fence_wait_callback(struct dma_fence *fence, + struct dma_fence_cb *cb) +#endif +{ + struct kbase_fence_cb *kcb = container_of(cb, + struct kbase_fence_cb, + fence_cb); + struct kbase_jd_atom *katom = kcb->katom; + struct kbase_context *kctx = katom->kctx; + + /* Cancel atom if fence is erroneous */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) + if (dma_fence_is_signaled(kcb->fence) && kcb->fence->error) +#else + if (dma_fence_is_signaled(kcb->fence) && kcb->fence->status < 0) +#endif + katom->event_code = BASE_JD_EVENT_JOB_CANCELLED; + + if (kbase_fence_dep_count_dec_and_test(katom)) { + /* We take responsibility of handling this */ + kbase_fence_dep_count_set(katom, -1); + + /* To prevent a potential deadlock we schedule the work onto the + * job_done_wq workqueue + * + * The issue is that we may signal the timeline while holding + * kctx->jctx.lock and the callbacks are run synchronously from + * sync_timeline_signal. So we simply defer the work. + */ + INIT_WORK(&katom->work, kbase_sync_fence_wait_worker); + queue_work(kctx->jctx.job_done_wq, &katom->work); + } +} + +int kbase_sync_fence_in_wait(struct kbase_jd_atom *katom) +{ + int err; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)) + struct fence *fence; +#else + struct dma_fence *fence; +#endif + + fence = kbase_fence_in_get(katom); + if (!fence) + return 0; /* no input fence to wait for, good to go! */ + + kbase_fence_dep_count_set(katom, 1); + + err = kbase_fence_add_callback(katom, fence, kbase_fence_wait_callback); + + kbase_fence_put(fence); + + if (likely(!err)) { + /* Test if the callbacks are already triggered */ + if (kbase_fence_dep_count_dec_and_test(katom)) { + kbase_fence_free_callbacks(katom); + kbase_fence_dep_count_set(katom, -1); + return 0; /* Already signaled, good to go right now */ + } + + /* Callback installed, so we just need to wait for it... */ + } else { + /* Failure */ + kbase_fence_free_callbacks(katom); + kbase_fence_dep_count_set(katom, -1); + + katom->event_code = BASE_JD_EVENT_JOB_CANCELLED; + + /* We should cause the dependent jobs in the bag to be failed, + * to do this we schedule the work queue to complete this job */ + + INIT_WORK(&katom->work, kbase_sync_fence_wait_worker); + queue_work(katom->kctx->jctx.job_done_wq, &katom->work); + } + + return 1; /* completion to be done later by callback/worker */ +} + +void kbase_sync_fence_in_cancel_wait(struct kbase_jd_atom *katom) +{ + if (!kbase_fence_free_callbacks(katom)) { + /* The wait wasn't cancelled - + * leave the cleanup for kbase_fence_wait_callback */ + return; + } + + /* Take responsibility of completion */ + kbase_fence_dep_count_set(katom, -1); + + /* Wait was cancelled - zap the atoms */ + katom->event_code = BASE_JD_EVENT_JOB_CANCELLED; + + kbasep_remove_waiting_soft_job(katom); + kbase_finish_soft_job(katom); + + if (jd_done_nolock(katom, NULL)) + kbase_js_sched_all(katom->kctx->kbdev); +} + +void kbase_sync_fence_out_remove(struct kbase_jd_atom *katom) +{ + kbase_fence_out_remove(katom); +} + +void kbase_sync_fence_in_remove(struct kbase_jd_atom *katom) +{ + kbase_fence_free_callbacks(katom); + kbase_fence_in_remove(katom); +} + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)) +static void kbase_sync_fence_info_get(struct fence *fence, + struct kbase_sync_fence_info *info) +#else +static void kbase_sync_fence_info_get(struct dma_fence *fence, + struct kbase_sync_fence_info *info) +#endif +{ + info->fence = fence; + + /* translate into CONFIG_SYNC status: + * < 0 : error + * 0 : active + * 1 : signaled + */ + if (dma_fence_is_signaled(fence)) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) + int status = fence->error; +#else + int status = fence->status; +#endif + if (status < 0) + info->status = status; /* signaled with error */ + else + info->status = 1; /* signaled with success */ + } else { + info->status = 0; /* still active (unsignaled) */ + } + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) + scnprintf(info->name, sizeof(info->name), "%u#%u", + fence->context, fence->seqno); +#else + scnprintf(info->name, sizeof(info->name), "%llu#%u", + fence->context, fence->seqno); +#endif +} + +int kbase_sync_fence_in_info_get(struct kbase_jd_atom *katom, + struct kbase_sync_fence_info *info) +{ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)) + struct fence *fence; +#else + struct dma_fence *fence; +#endif + + fence = kbase_fence_in_get(katom); + if (!fence) + return -ENOENT; + + kbase_sync_fence_info_get(fence, info); + + kbase_fence_put(fence); + + return 0; +} + +int kbase_sync_fence_out_info_get(struct kbase_jd_atom *katom, + struct kbase_sync_fence_info *info) +{ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)) + struct fence *fence; +#else + struct dma_fence *fence; +#endif + + fence = kbase_fence_out_get(katom); + if (!fence) + return -ENOENT; + + kbase_sync_fence_info_get(fence, info); + + kbase_fence_put(fence); + + return 0; +} + + +#ifdef CONFIG_MALI_FENCE_DEBUG +void kbase_sync_fence_in_dump(struct kbase_jd_atom *katom) +{ + /* Not implemented */ +} +#endif diff --git a/drivers/gpu/arm/midgard/mali_kbase_tlstream.c b/drivers/gpu/arm/midgard/mali_kbase_tlstream.c new file mode 100644 index 00000000000000..6f658ec6b9de0a --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_tlstream.c @@ -0,0 +1,2575 @@ +/* + * + * (C) COPYRIGHT 2015-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +/*****************************************************************************/ + +/* The version of swtrace protocol used in timeline stream. */ +#define SWTRACE_VERSION 3 + +/* The maximum expected length of string in tracepoint descriptor. */ +#define STRLEN_MAX 64 /* bytes */ + +/* The number of nanoseconds in a second. */ +#define NSECS_IN_SEC 1000000000ull /* ns */ + +/* The period of autoflush checker execution in milliseconds. */ +#define AUTOFLUSH_INTERVAL 1000 /* ms */ + +/* The maximum size of a single packet used by timeline. */ +#define PACKET_SIZE 4096 /* bytes */ + +/* The number of packets used by one timeline stream. */ +#define PACKET_COUNT 16 + +/* The number of bytes reserved for packet header. + * These value must be defined according to MIPE documentation. */ +#define PACKET_HEADER_SIZE 8 /* bytes */ + +/* The number of bytes reserved for packet sequence number. + * These value must be defined according to MIPE documentation. */ +#define PACKET_NUMBER_SIZE 4 /* bytes */ + +/* Packet header - first word. + * These values must be defined according to MIPE documentation. */ +#define PACKET_STREAMID_POS 0 +#define PACKET_STREAMID_LEN 8 +#define PACKET_RSVD1_POS (PACKET_STREAMID_POS + PACKET_STREAMID_LEN) +#define PACKET_RSVD1_LEN 8 +#define PACKET_TYPE_POS (PACKET_RSVD1_POS + PACKET_RSVD1_LEN) +#define PACKET_TYPE_LEN 3 +#define PACKET_CLASS_POS (PACKET_TYPE_POS + PACKET_TYPE_LEN) +#define PACKET_CLASS_LEN 7 +#define PACKET_FAMILY_POS (PACKET_CLASS_POS + PACKET_CLASS_LEN) +#define PACKET_FAMILY_LEN 6 + +/* Packet header - second word + * These values must be defined according to MIPE documentation. */ +#define PACKET_LENGTH_POS 0 +#define PACKET_LENGTH_LEN 24 +#define PACKET_SEQBIT_POS (PACKET_LENGTH_POS + PACKET_LENGTH_LEN) +#define PACKET_SEQBIT_LEN 1 +#define PACKET_RSVD2_POS (PACKET_SEQBIT_POS + PACKET_SEQBIT_LEN) +#define PACKET_RSVD2_LEN 7 + +/* Types of streams generated by timeline. + * Order is significant! Header streams must precede respective body streams. */ +enum tl_stream_type { + TL_STREAM_TYPE_OBJ_HEADER, + TL_STREAM_TYPE_OBJ_SUMMARY, + TL_STREAM_TYPE_OBJ, + TL_STREAM_TYPE_AUX_HEADER, + TL_STREAM_TYPE_AUX, + + TL_STREAM_TYPE_COUNT +}; + +/* Timeline packet family ids. + * Values are significant! Check MIPE documentation. */ +enum tl_packet_family { + TL_PACKET_FAMILY_CTRL = 0, /* control packets */ + TL_PACKET_FAMILY_TL = 1, /* timeline packets */ + + TL_PACKET_FAMILY_COUNT +}; + +/* Packet classes used in timeline streams. + * Values are significant! Check MIPE documentation. */ +enum tl_packet_class { + TL_PACKET_CLASS_OBJ = 0, /* timeline objects packet */ + TL_PACKET_CLASS_AUX = 1, /* auxiliary events packet */ +}; + +/* Packet types used in timeline streams. + * Values are significant! Check MIPE documentation. */ +enum tl_packet_type { + TL_PACKET_TYPE_HEADER = 0, /* stream's header/directory */ + TL_PACKET_TYPE_BODY = 1, /* stream's body */ + TL_PACKET_TYPE_SUMMARY = 2, /* stream's summary */ +}; + +/* Message ids of trace events that are recorded in the timeline stream. */ +enum tl_msg_id_obj { + /* Timeline object events. */ + KBASE_TL_NEW_CTX, + KBASE_TL_NEW_GPU, + KBASE_TL_NEW_LPU, + KBASE_TL_NEW_ATOM, + KBASE_TL_NEW_AS, + KBASE_TL_DEL_CTX, + KBASE_TL_DEL_ATOM, + KBASE_TL_LIFELINK_LPU_GPU, + KBASE_TL_LIFELINK_AS_GPU, + KBASE_TL_RET_CTX_LPU, + KBASE_TL_RET_ATOM_CTX, + KBASE_TL_RET_ATOM_LPU, + KBASE_TL_NRET_CTX_LPU, + KBASE_TL_NRET_ATOM_CTX, + KBASE_TL_NRET_ATOM_LPU, + KBASE_TL_RET_AS_CTX, + KBASE_TL_NRET_AS_CTX, + KBASE_TL_RET_ATOM_AS, + KBASE_TL_NRET_ATOM_AS, + KBASE_TL_DEP_ATOM_ATOM, + KBASE_TL_NDEP_ATOM_ATOM, + KBASE_TL_RDEP_ATOM_ATOM, + KBASE_TL_ATTRIB_ATOM_CONFIG, + KBASE_TL_ATTRIB_ATOM_PRIORITY, + KBASE_TL_ATTRIB_ATOM_STATE, + KBASE_TL_ATTRIB_ATOM_PRIORITY_CHANGE, + KBASE_TL_ATTRIB_ATOM_JIT, + KBASE_TL_ATTRIB_AS_CONFIG, + KBASE_TL_EVENT_LPU_SOFTSTOP, + KBASE_TL_EVENT_ATOM_SOFTSTOP_EX, + KBASE_TL_EVENT_ATOM_SOFTSTOP_ISSUE, + + /* Job dump specific events. */ + KBASE_JD_GPU_SOFT_RESET +}; + +/* Message ids of trace events that are recorded in the auxiliary stream. */ +enum tl_msg_id_aux { + KBASE_AUX_PM_STATE, + KBASE_AUX_PAGEFAULT, + KBASE_AUX_PAGESALLOC, + KBASE_AUX_DEVFREQ_TARGET, + KBASE_AUX_PROTECTED_ENTER_START, + KBASE_AUX_PROTECTED_ENTER_END, + KBASE_AUX_PROTECTED_LEAVE_START, + KBASE_AUX_PROTECTED_LEAVE_END +}; + +/*****************************************************************************/ + +/** + * struct tl_stream - timeline stream structure + * @lock: message order lock + * @buffer: array of buffers + * @wbi: write buffer index + * @rbi: read buffer index + * @numbered: if non-zero stream's packets are sequentially numbered + * @autoflush_counter: counter tracking stream's autoflush state + * + * This structure holds information needed to construct proper packets in the + * timeline stream. Each message in sequence must bear timestamp that is greater + * to one in previous message in the same stream. For this reason lock is held + * throughout the process of message creation. Each stream contains set of + * buffers. Each buffer will hold one MIPE packet. In case there is no free + * space required to store incoming message the oldest buffer is discarded. + * Each packet in timeline body stream has sequence number embedded (this value + * must increment monotonically and is used by packets receiver to discover + * buffer overflows. + * Autoflush counter is set to negative number when there is no data pending + * for flush and it is set to zero on every update of the buffer. Autoflush + * timer will increment the counter by one on every expiry. In case there will + * be no activity on the buffer during two consecutive timer expiries, stream + * buffer will be flushed. + */ +struct tl_stream { + spinlock_t lock; + + struct { + atomic_t size; /* number of bytes in buffer */ + char data[PACKET_SIZE]; /* buffer's data */ + } buffer[PACKET_COUNT]; + + atomic_t wbi; + atomic_t rbi; + + int numbered; + atomic_t autoflush_counter; +}; + +/** + * struct tp_desc - tracepoint message descriptor structure + * @id: tracepoint ID identifying message in stream + * @id_str: human readable version of tracepoint ID + * @name: tracepoint description + * @arg_types: tracepoint's arguments types declaration + * @arg_names: comma separated list of tracepoint's arguments names + */ +struct tp_desc { + u32 id; + const char *id_str; + const char *name; + const char *arg_types; + const char *arg_names; +}; + +/*****************************************************************************/ + +/* Configuration of timeline streams generated by kernel. + * Kernel emit only streams containing either timeline object events or + * auxiliary events. All streams have stream id value of 1 (as opposed to user + * space streams that have value of 0). */ +static const struct { + enum tl_packet_family pkt_family; + enum tl_packet_class pkt_class; + enum tl_packet_type pkt_type; + unsigned int stream_id; +} tl_stream_cfg[TL_STREAM_TYPE_COUNT] = { + {TL_PACKET_FAMILY_TL, TL_PACKET_CLASS_OBJ, TL_PACKET_TYPE_HEADER, 1}, + {TL_PACKET_FAMILY_TL, TL_PACKET_CLASS_OBJ, TL_PACKET_TYPE_SUMMARY, 1}, + {TL_PACKET_FAMILY_TL, TL_PACKET_CLASS_OBJ, TL_PACKET_TYPE_BODY, 1}, + {TL_PACKET_FAMILY_TL, TL_PACKET_CLASS_AUX, TL_PACKET_TYPE_HEADER, 1}, + {TL_PACKET_FAMILY_TL, TL_PACKET_CLASS_AUX, TL_PACKET_TYPE_BODY, 1} +}; + +/* The timeline streams generated by kernel. */ +static struct tl_stream *tl_stream[TL_STREAM_TYPE_COUNT]; + +/* Autoflush timer. */ +static struct timer_list autoflush_timer; + +/* If non-zero autoflush timer is active. */ +static atomic_t autoflush_timer_active; + +/* Reader lock. Only one reader is allowed to have access to the timeline + * streams at any given time. */ +static DEFINE_MUTEX(tl_reader_lock); + +/* Timeline stream event queue. */ +static DECLARE_WAIT_QUEUE_HEAD(tl_event_queue); + +/* The timeline stream file operations functions. */ +static ssize_t kbasep_tlstream_read( + struct file *filp, + char __user *buffer, + size_t size, + loff_t *f_pos); +static unsigned int kbasep_tlstream_poll(struct file *filp, poll_table *wait); +static int kbasep_tlstream_release(struct inode *inode, struct file *filp); + +/* The timeline stream file operations structure. */ +static const struct file_operations kbasep_tlstream_fops = { + .release = kbasep_tlstream_release, + .read = kbasep_tlstream_read, + .poll = kbasep_tlstream_poll, +}; + +/* Descriptors of timeline messages transmitted in object events stream. */ +static const struct tp_desc tp_desc_obj[] = { + { + KBASE_TL_NEW_CTX, + __stringify(KBASE_TL_NEW_CTX), + "object ctx is created", + "@pII", + "ctx,ctx_nr,tgid" + }, + { + KBASE_TL_NEW_GPU, + __stringify(KBASE_TL_NEW_GPU), + "object gpu is created", + "@pII", + "gpu,gpu_id,core_count" + }, + { + KBASE_TL_NEW_LPU, + __stringify(KBASE_TL_NEW_LPU), + "object lpu is created", + "@pII", + "lpu,lpu_nr,lpu_fn" + }, + { + KBASE_TL_NEW_ATOM, + __stringify(KBASE_TL_NEW_ATOM), + "object atom is created", + "@pI", + "atom,atom_nr" + }, + { + KBASE_TL_NEW_AS, + __stringify(KBASE_TL_NEW_AS), + "address space object is created", + "@pI", + "address_space,as_nr" + }, + { + KBASE_TL_DEL_CTX, + __stringify(KBASE_TL_DEL_CTX), + "context is destroyed", + "@p", + "ctx" + }, + { + KBASE_TL_DEL_ATOM, + __stringify(KBASE_TL_DEL_ATOM), + "atom is destroyed", + "@p", + "atom" + }, + { + KBASE_TL_LIFELINK_LPU_GPU, + __stringify(KBASE_TL_LIFELINK_LPU_GPU), + "lpu is deleted with gpu", + "@pp", + "lpu,gpu" + }, + { + KBASE_TL_LIFELINK_AS_GPU, + __stringify(KBASE_TL_LIFELINK_AS_GPU), + "address space is deleted with gpu", + "@pp", + "address_space,gpu" + }, + { + KBASE_TL_RET_CTX_LPU, + __stringify(KBASE_TL_RET_CTX_LPU), + "context is retained by lpu", + "@pp", + "ctx,lpu" + }, + { + KBASE_TL_RET_ATOM_CTX, + __stringify(KBASE_TL_RET_ATOM_CTX), + "atom is retained by context", + "@pp", + "atom,ctx" + }, + { + KBASE_TL_RET_ATOM_LPU, + __stringify(KBASE_TL_RET_ATOM_LPU), + "atom is retained by lpu", + "@pps", + "atom,lpu,attrib_match_list" + }, + { + KBASE_TL_NRET_CTX_LPU, + __stringify(KBASE_TL_NRET_CTX_LPU), + "context is released by lpu", + "@pp", + "ctx,lpu" + }, + { + KBASE_TL_NRET_ATOM_CTX, + __stringify(KBASE_TL_NRET_ATOM_CTX), + "atom is released by context", + "@pp", + "atom,ctx" + }, + { + KBASE_TL_NRET_ATOM_LPU, + __stringify(KBASE_TL_NRET_ATOM_LPU), + "atom is released by lpu", + "@pp", + "atom,lpu" + }, + { + KBASE_TL_RET_AS_CTX, + __stringify(KBASE_TL_RET_AS_CTX), + "address space is retained by context", + "@pp", + "address_space,ctx" + }, + { + KBASE_TL_NRET_AS_CTX, + __stringify(KBASE_TL_NRET_AS_CTX), + "address space is released by context", + "@pp", + "address_space,ctx" + }, + { + KBASE_TL_RET_ATOM_AS, + __stringify(KBASE_TL_RET_ATOM_AS), + "atom is retained by address space", + "@pp", + "atom,address_space" + }, + { + KBASE_TL_NRET_ATOM_AS, + __stringify(KBASE_TL_NRET_ATOM_AS), + "atom is released by address space", + "@pp", + "atom,address_space" + }, + { + KBASE_TL_DEP_ATOM_ATOM, + __stringify(KBASE_TL_DEP_ATOM_ATOM), + "atom2 depends on atom1", + "@pp", + "atom1,atom2" + }, + { + KBASE_TL_NDEP_ATOM_ATOM, + __stringify(KBASE_TL_NDEP_ATOM_ATOM), + "atom2 no longer depends on atom1", + "@pp", + "atom1,atom2" + }, + { + KBASE_TL_RDEP_ATOM_ATOM, + __stringify(KBASE_TL_RDEP_ATOM_ATOM), + "resolved dependecy of atom2 depending on atom1", + "@pp", + "atom1,atom2" + }, + { + KBASE_TL_ATTRIB_ATOM_CONFIG, + __stringify(KBASE_TL_ATTRIB_ATOM_CONFIG), + "atom job slot attributes", + "@pLLI", + "atom,descriptor,affinity,config" + }, + { + KBASE_TL_ATTRIB_ATOM_PRIORITY, + __stringify(KBASE_TL_ATTRIB_ATOM_PRIORITY), + "atom priority", + "@pI", + "atom,prio" + }, + { + KBASE_TL_ATTRIB_ATOM_STATE, + __stringify(KBASE_TL_ATTRIB_ATOM_STATE), + "atom state", + "@pI", + "atom,state" + }, + { + KBASE_TL_ATTRIB_ATOM_PRIORITY_CHANGE, + __stringify(KBASE_TL_ATTRIB_ATOM_PRIORITY_CHANGE), + "atom caused priority change", + "@p", + "atom" + }, + { + KBASE_TL_ATTRIB_ATOM_JIT, + __stringify(KBASE_TL_ATTRIB_ATOM_JIT), + "jit done for atom", + "@pLL", + "atom,edit_addr,new_addr" + }, + { + KBASE_TL_ATTRIB_AS_CONFIG, + __stringify(KBASE_TL_ATTRIB_AS_CONFIG), + "address space attributes", + "@pLLL", + "address_space,transtab,memattr,transcfg" + }, + { + KBASE_TL_EVENT_LPU_SOFTSTOP, + __stringify(KBASE_TL_EVENT_LPU_SOFTSTOP), + "softstop event on given lpu", + "@p", + "lpu" + }, + { + KBASE_TL_EVENT_ATOM_SOFTSTOP_EX, + __stringify(KBASE_TL_EVENT_ATOM_SOFTSTOP_EX), + "atom softstopped", + "@p", + "atom" + }, + { + KBASE_TL_EVENT_ATOM_SOFTSTOP_ISSUE, + __stringify(KBASE_TL_EVENT_SOFTSTOP_ISSUE), + "atom softstop issued", + "@p", + "atom" + }, + { + KBASE_JD_GPU_SOFT_RESET, + __stringify(KBASE_JD_GPU_SOFT_RESET), + "gpu soft reset", + "@p", + "gpu" + }, +}; + +/* Descriptors of timeline messages transmitted in auxiliary events stream. */ +static const struct tp_desc tp_desc_aux[] = { + { + KBASE_AUX_PM_STATE, + __stringify(KBASE_AUX_PM_STATE), + "PM state", + "@IL", + "core_type,core_state_bitset" + }, + { + KBASE_AUX_PAGEFAULT, + __stringify(KBASE_AUX_PAGEFAULT), + "Page fault", + "@IL", + "ctx_nr,page_cnt_change" + }, + { + KBASE_AUX_PAGESALLOC, + __stringify(KBASE_AUX_PAGESALLOC), + "Total alloc pages change", + "@IL", + "ctx_nr,page_cnt" + }, + { + KBASE_AUX_DEVFREQ_TARGET, + __stringify(KBASE_AUX_DEVFREQ_TARGET), + "New device frequency target", + "@L", + "target_freq" + }, + { + KBASE_AUX_PROTECTED_ENTER_START, + __stringify(KBASE_AUX_PROTECTED_ENTER_START), + "enter protected mode start", + "@p", + "gpu" + }, + { + KBASE_AUX_PROTECTED_ENTER_END, + __stringify(KBASE_AUX_PROTECTED_ENTER_END), + "enter protected mode end", + "@p", + "gpu" + }, + { + KBASE_AUX_PROTECTED_LEAVE_START, + __stringify(KBASE_AUX_PROTECTED_LEAVE_START), + "leave protected mode start", + "@p", + "gpu" + }, + { + KBASE_AUX_PROTECTED_LEAVE_END, + __stringify(KBASE_AUX_PROTECTED_LEAVE_END), + "leave protected mode end", + "@p", + "gpu" + } +}; + +#if MALI_UNIT_TEST +/* Number of bytes read by user. */ +static atomic_t tlstream_bytes_collected = {0}; + +/* Number of bytes generated by tracepoint messages. */ +static atomic_t tlstream_bytes_generated = {0}; +#endif /* MALI_UNIT_TEST */ + +/*****************************************************************************/ + +/* Indicator of whether the timeline stream file descriptor is used. */ +atomic_t kbase_tlstream_enabled = {0}; + +/*****************************************************************************/ + +/** + * kbasep_tlstream_get_timestamp - return timestamp + * + * Function returns timestamp value based on raw monotonic timer. Value will + * wrap around zero in case of overflow. + * Return: timestamp value + */ +static u64 kbasep_tlstream_get_timestamp(void) +{ + u64 timestamp; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0) + struct timespec64 ts; + ktime_get_raw_ts64(&ts); +#else + struct timespec ts; + getrawmonotonic(&ts); +#endif + timestamp = (u64)ts.tv_sec * NSECS_IN_SEC + ts.tv_nsec; + return timestamp; +} + +/** + * kbasep_tlstream_write_bytes - write data to message buffer + * @buffer: buffer where data will be written + * @pos: position in the buffer where to place data + * @bytes: pointer to buffer holding data + * @len: length of data to be written + * + * Return: updated position in the buffer + */ +static size_t kbasep_tlstream_write_bytes( + char *buffer, + size_t pos, + const void *bytes, + size_t len) +{ + KBASE_DEBUG_ASSERT(buffer); + KBASE_DEBUG_ASSERT(bytes); + + memcpy(&buffer[pos], bytes, len); + + return pos + len; +} + +/** + * kbasep_tlstream_write_string - write string to message buffer + * @buffer: buffer where data will be written + * @pos: position in the buffer where to place data + * @string: pointer to buffer holding the source string + * @max_write_size: number of bytes that can be stored in buffer + * + * Return: updated position in the buffer + */ +static size_t kbasep_tlstream_write_string( + char *buffer, + size_t pos, + const char *string, + size_t max_write_size) +{ + u32 string_len; + + KBASE_DEBUG_ASSERT(buffer); + KBASE_DEBUG_ASSERT(string); + /* Timeline string consists of at least string length and nul + * terminator. */ + KBASE_DEBUG_ASSERT(max_write_size >= sizeof(string_len) + sizeof(char)); + max_write_size -= sizeof(string_len); + + string_len = strlcpy( + &buffer[pos + sizeof(string_len)], + string, + max_write_size); + string_len += sizeof(char); + + /* Make sure that the source string fit into the buffer. */ + KBASE_DEBUG_ASSERT(string_len <= max_write_size); + + /* Update string length. */ + memcpy(&buffer[pos], &string_len, sizeof(string_len)); + + return pos + sizeof(string_len) + string_len; +} + +/** + * kbasep_tlstream_write_timestamp - write timestamp to message buffer + * @buffer: buffer where data will be written + * @pos: position in the buffer where to place data + * + * Return: updated position in the buffer + */ +static size_t kbasep_tlstream_write_timestamp(void *buffer, size_t pos) +{ + u64 timestamp = kbasep_tlstream_get_timestamp(); + + return kbasep_tlstream_write_bytes( + buffer, pos, + ×tamp, sizeof(timestamp)); +} + +/** + * kbasep_tlstream_put_bits - put bits in a word + * @word: pointer to the words being modified + * @value: value that shall be written to given position + * @bitpos: position where value shall be written (in bits) + * @bitlen: length of value (in bits) + */ +static void kbasep_tlstream_put_bits( + u32 *word, + u32 value, + unsigned int bitpos, + unsigned int bitlen) +{ + const u32 mask = ((1 << bitlen) - 1) << bitpos; + + KBASE_DEBUG_ASSERT(word); + KBASE_DEBUG_ASSERT((0 != bitlen) && (32 >= bitlen)); + KBASE_DEBUG_ASSERT((bitpos + bitlen) <= 32); + + *word &= ~mask; + *word |= ((value << bitpos) & mask); +} + +/** + * kbasep_tlstream_packet_header_setup - setup the packet header + * @buffer: pointer to the buffer + * @pkt_family: packet's family + * @pkt_type: packet's type + * @pkt_class: packet's class + * @stream_id: stream id + * @numbered: non-zero if this stream is numbered + * + * Function sets up immutable part of packet header in the given buffer. + */ +static void kbasep_tlstream_packet_header_setup( + char *buffer, + enum tl_packet_family pkt_family, + enum tl_packet_class pkt_class, + enum tl_packet_type pkt_type, + unsigned int stream_id, + int numbered) +{ + u32 word0 = 0; + u32 word1 = 0; + + KBASE_DEBUG_ASSERT(buffer); + KBASE_DEBUG_ASSERT(pkt_family == TL_PACKET_FAMILY_TL); + KBASE_DEBUG_ASSERT( + (pkt_type == TL_PACKET_TYPE_HEADER) || + (pkt_type == TL_PACKET_TYPE_SUMMARY) || + (pkt_type == TL_PACKET_TYPE_BODY)); + KBASE_DEBUG_ASSERT( + (pkt_class == TL_PACKET_CLASS_OBJ) || + (pkt_class == TL_PACKET_CLASS_AUX)); + + kbasep_tlstream_put_bits( + &word0, pkt_family, + PACKET_FAMILY_POS, PACKET_FAMILY_LEN); + kbasep_tlstream_put_bits( + &word0, pkt_class, + PACKET_CLASS_POS, PACKET_CLASS_LEN); + kbasep_tlstream_put_bits( + &word0, pkt_type, + PACKET_TYPE_POS, PACKET_TYPE_LEN); + kbasep_tlstream_put_bits( + &word0, stream_id, + PACKET_STREAMID_POS, PACKET_STREAMID_LEN); + + if (numbered) + kbasep_tlstream_put_bits( + &word1, 1, + PACKET_SEQBIT_POS, PACKET_SEQBIT_LEN); + + memcpy(&buffer[0], &word0, sizeof(word0)); + memcpy(&buffer[sizeof(word0)], &word1, sizeof(word1)); +} + +/** + * kbasep_tlstream_packet_header_update - update the packet header + * @buffer: pointer to the buffer + * @data_size: amount of data carried in this packet + * + * Function updates mutable part of packet header in the given buffer. + * Note that value of data_size must not including size of the header. + */ +static void kbasep_tlstream_packet_header_update( + char *buffer, + size_t data_size) +{ + u32 word0; + u32 word1; + + KBASE_DEBUG_ASSERT(buffer); + CSTD_UNUSED(word0); + + memcpy(&word1, &buffer[sizeof(word0)], sizeof(word1)); + + kbasep_tlstream_put_bits( + &word1, data_size, + PACKET_LENGTH_POS, PACKET_LENGTH_LEN); + + memcpy(&buffer[sizeof(word0)], &word1, sizeof(word1)); +} + +/** + * kbasep_tlstream_packet_number_update - update the packet number + * @buffer: pointer to the buffer + * @counter: value of packet counter for this packet's stream + * + * Function updates packet number embedded within the packet placed in the + * given buffer. + */ +static void kbasep_tlstream_packet_number_update(char *buffer, u32 counter) +{ + KBASE_DEBUG_ASSERT(buffer); + + memcpy(&buffer[PACKET_HEADER_SIZE], &counter, sizeof(counter)); +} + +/** + * kbasep_timeline_stream_reset - reset stream + * @stream: pointer to the stream structure + * + * Function discards all pending messages and resets packet counters. + */ +static void kbasep_timeline_stream_reset(struct tl_stream *stream) +{ + unsigned int i; + + for (i = 0; i < PACKET_COUNT; i++) { + if (stream->numbered) + atomic_set( + &stream->buffer[i].size, + PACKET_HEADER_SIZE + + PACKET_NUMBER_SIZE); + else + atomic_set(&stream->buffer[i].size, PACKET_HEADER_SIZE); + } + + atomic_set(&stream->wbi, 0); + atomic_set(&stream->rbi, 0); +} + +/** + * kbasep_timeline_stream_init - initialize timeline stream + * @stream: pointer to the stream structure + * @stream_type: stream type + */ +static void kbasep_timeline_stream_init( + struct tl_stream *stream, + enum tl_stream_type stream_type) +{ + unsigned int i; + + KBASE_DEBUG_ASSERT(stream); + KBASE_DEBUG_ASSERT(TL_STREAM_TYPE_COUNT > stream_type); + + spin_lock_init(&stream->lock); + + /* All packets carrying tracepoints shall be numbered. */ + if (TL_PACKET_TYPE_BODY == tl_stream_cfg[stream_type].pkt_type) + stream->numbered = 1; + else + stream->numbered = 0; + + for (i = 0; i < PACKET_COUNT; i++) + kbasep_tlstream_packet_header_setup( + stream->buffer[i].data, + tl_stream_cfg[stream_type].pkt_family, + tl_stream_cfg[stream_type].pkt_class, + tl_stream_cfg[stream_type].pkt_type, + tl_stream_cfg[stream_type].stream_id, + stream->numbered); + + kbasep_timeline_stream_reset(tl_stream[stream_type]); +} + +/** + * kbasep_timeline_stream_term - terminate timeline stream + * @stream: pointer to the stream structure + */ +static void kbasep_timeline_stream_term(struct tl_stream *stream) +{ + KBASE_DEBUG_ASSERT(stream); +} + +/** + * kbasep_tlstream_msgbuf_submit - submit packet to the user space + * @stream: pointer to the stream structure + * @wb_idx_raw: write buffer index + * @wb_size: length of data stored in current buffer + * + * Function updates currently written buffer with packet header. Then write + * index is incremented and buffer is handled to user space. Parameters + * of new buffer are returned using provided arguments. + * + * Return: length of data in new buffer + * + * Warning: User must update the stream structure with returned value. + */ +static size_t kbasep_tlstream_msgbuf_submit( + struct tl_stream *stream, + unsigned int wb_idx_raw, + unsigned int wb_size) +{ + unsigned int rb_idx_raw = atomic_read(&stream->rbi); + unsigned int wb_idx = wb_idx_raw % PACKET_COUNT; + + /* Set stream as flushed. */ + atomic_set(&stream->autoflush_counter, -1); + + kbasep_tlstream_packet_header_update( + stream->buffer[wb_idx].data, + wb_size - PACKET_HEADER_SIZE); + + if (stream->numbered) + kbasep_tlstream_packet_number_update( + stream->buffer[wb_idx].data, + wb_idx_raw); + + /* Increasing write buffer index will expose this packet to the reader. + * As stream->lock is not taken on reader side we must make sure memory + * is updated correctly before this will happen. */ + smp_wmb(); + wb_idx_raw++; + atomic_set(&stream->wbi, wb_idx_raw); + + /* Inform user that packets are ready for reading. */ + wake_up_interruptible(&tl_event_queue); + + /* Detect and mark overflow in this stream. */ + if (PACKET_COUNT == wb_idx_raw - rb_idx_raw) { + /* Reader side depends on this increment to correctly handle + * overflows. The value shall be updated only if it was not + * modified by the reader. The data holding buffer will not be + * updated before stream->lock is released, however size of the + * buffer will. Make sure this increment is globally visible + * before information about selected write buffer size. */ + atomic_cmpxchg(&stream->rbi, rb_idx_raw, rb_idx_raw + 1); + } + + wb_size = PACKET_HEADER_SIZE; + if (stream->numbered) + wb_size += PACKET_NUMBER_SIZE; + + return wb_size; +} + +/** + * kbasep_tlstream_msgbuf_acquire - lock selected stream and reserves buffer + * @stream_type: type of the stream that shall be locked + * @msg_size: message size + * @flags: pointer to store flags passed back on stream release + * + * Function will lock the stream and reserve the number of bytes requested + * in msg_size for the user. + * + * Return: pointer to the buffer where message can be stored + * + * Warning: Stream must be released with kbasep_tlstream_msgbuf_release(). + * Only atomic operations are allowed while stream is locked + * (i.e. do not use any operation that may sleep). + */ +static char *kbasep_tlstream_msgbuf_acquire( + enum tl_stream_type stream_type, + size_t msg_size, + unsigned long *flags) __acquires(&stream->lock) +{ + struct tl_stream *stream; + unsigned int wb_idx_raw; + unsigned int wb_idx; + size_t wb_size; + + KBASE_DEBUG_ASSERT(TL_STREAM_TYPE_COUNT > stream_type); + KBASE_DEBUG_ASSERT( + PACKET_SIZE - PACKET_HEADER_SIZE - PACKET_NUMBER_SIZE >= + msg_size); + + stream = tl_stream[stream_type]; + + spin_lock_irqsave(&stream->lock, *flags); + + wb_idx_raw = atomic_read(&stream->wbi); + wb_idx = wb_idx_raw % PACKET_COUNT; + wb_size = atomic_read(&stream->buffer[wb_idx].size); + + /* Select next buffer if data will not fit into current one. */ + if (PACKET_SIZE < wb_size + msg_size) { + wb_size = kbasep_tlstream_msgbuf_submit( + stream, wb_idx_raw, wb_size); + wb_idx = (wb_idx_raw + 1) % PACKET_COUNT; + } + + /* Reserve space in selected buffer. */ + atomic_set(&stream->buffer[wb_idx].size, wb_size + msg_size); + +#if MALI_UNIT_TEST + atomic_add(msg_size, &tlstream_bytes_generated); +#endif /* MALI_UNIT_TEST */ + + return &stream->buffer[wb_idx].data[wb_size]; +} + +/** + * kbasep_tlstream_msgbuf_release - unlock selected stream + * @stream_type: type of the stream that shall be locked + * @flags: value obtained during stream acquire + * + * Function releases stream that has been previously locked with a call to + * kbasep_tlstream_msgbuf_acquire(). + */ +static void kbasep_tlstream_msgbuf_release( + enum tl_stream_type stream_type, + unsigned long flags) __releases(&stream->lock) +{ + struct tl_stream *stream; + + KBASE_DEBUG_ASSERT(TL_STREAM_TYPE_COUNT > stream_type); + + stream = tl_stream[stream_type]; + + /* Mark stream as containing unflushed data. */ + atomic_set(&stream->autoflush_counter, 0); + + spin_unlock_irqrestore(&stream->lock, flags); +} + +/*****************************************************************************/ + +/** + * kbasep_tlstream_flush_stream - flush stream + * @stype: type of stream to be flushed + * + * Flush pending data in timeline stream. + */ +static void kbasep_tlstream_flush_stream(enum tl_stream_type stype) +{ + struct tl_stream *stream = tl_stream[stype]; + unsigned long flags; + unsigned int wb_idx_raw; + unsigned int wb_idx; + size_t wb_size; + size_t min_size = PACKET_HEADER_SIZE; + + if (stream->numbered) + min_size += PACKET_NUMBER_SIZE; + + spin_lock_irqsave(&stream->lock, flags); + + wb_idx_raw = atomic_read(&stream->wbi); + wb_idx = wb_idx_raw % PACKET_COUNT; + wb_size = atomic_read(&stream->buffer[wb_idx].size); + + if (wb_size > min_size) { + wb_size = kbasep_tlstream_msgbuf_submit( + stream, wb_idx_raw, wb_size); + wb_idx = (wb_idx_raw + 1) % PACKET_COUNT; + atomic_set(&stream->buffer[wb_idx].size, wb_size); + } + spin_unlock_irqrestore(&stream->lock, flags); +} + +/** + * kbasep_tlstream_autoflush_timer_callback - autoflush timer callback + * @data: unused + * + * Timer is executed periodically to check if any of the stream contains + * buffer ready to be submitted to user space. + */ +static void kbasep_tlstream_autoflush_timer_callback(struct timer_list *unused) +{ + enum tl_stream_type stype; + int rcode; + + for (stype = 0; stype < TL_STREAM_TYPE_COUNT; stype++) { + struct tl_stream *stream = tl_stream[stype]; + unsigned long flags; + unsigned int wb_idx_raw; + unsigned int wb_idx; + size_t wb_size; + size_t min_size = PACKET_HEADER_SIZE; + + int af_cnt = atomic_read(&stream->autoflush_counter); + + /* Check if stream contain unflushed data. */ + if (0 > af_cnt) + continue; + + /* Check if stream should be flushed now. */ + if (af_cnt != atomic_cmpxchg( + &stream->autoflush_counter, + af_cnt, + af_cnt + 1)) + continue; + if (!af_cnt) + continue; + + /* Autoflush this stream. */ + if (stream->numbered) + min_size += PACKET_NUMBER_SIZE; + + spin_lock_irqsave(&stream->lock, flags); + + wb_idx_raw = atomic_read(&stream->wbi); + wb_idx = wb_idx_raw % PACKET_COUNT; + wb_size = atomic_read(&stream->buffer[wb_idx].size); + + if (wb_size > min_size) { + wb_size = kbasep_tlstream_msgbuf_submit( + stream, wb_idx_raw, wb_size); + wb_idx = (wb_idx_raw + 1) % PACKET_COUNT; + atomic_set(&stream->buffer[wb_idx].size, + wb_size); + } + spin_unlock_irqrestore(&stream->lock, flags); + } + + if (atomic_read(&autoflush_timer_active)) + rcode = mod_timer( + &autoflush_timer, + jiffies + msecs_to_jiffies(AUTOFLUSH_INTERVAL)); + CSTD_UNUSED(rcode); +} + +/** + * kbasep_tlstream_packet_pending - check timeline streams for pending packets + * @stype: pointer to variable where stream type will be placed + * @rb_idx_raw: pointer to variable where read buffer index will be placed + * + * Function checks all streams for pending packets. It will stop as soon as + * packet ready to be submitted to user space is detected. Variables under + * pointers, passed as the parameters to this function will be updated with + * values pointing to right stream and buffer. + * + * Return: non-zero if any of timeline streams has at last one packet ready + */ +static int kbasep_tlstream_packet_pending( + enum tl_stream_type *stype, + unsigned int *rb_idx_raw) +{ + int pending = 0; + + KBASE_DEBUG_ASSERT(stype); + KBASE_DEBUG_ASSERT(rb_idx_raw); + + for ( + *stype = 0; + (*stype < TL_STREAM_TYPE_COUNT) && !pending; + (*stype)++) { + if (NULL != tl_stream[*stype]) { + *rb_idx_raw = atomic_read(&tl_stream[*stype]->rbi); + /* Read buffer index may be updated by writer in case of + * overflow. Read and write buffer indexes must be + * loaded in correct order. */ + smp_rmb(); + if (atomic_read(&tl_stream[*stype]->wbi) != *rb_idx_raw) + pending = 1; + } + } + (*stype)--; + + return pending; +} + +/** + * kbasep_tlstream_read - copy data from streams to buffer provided by user + * @filp: pointer to file structure (unused) + * @buffer: pointer to the buffer provided by user + * @size: maximum amount of data that can be stored in the buffer + * @f_pos: pointer to file offset (unused) + * + * Return: number of bytes stored in the buffer + */ +static ssize_t kbasep_tlstream_read( + struct file *filp, + char __user *buffer, + size_t size, + loff_t *f_pos) +{ + ssize_t copy_len = 0; + + KBASE_DEBUG_ASSERT(filp); + KBASE_DEBUG_ASSERT(f_pos); + + if (!buffer) + return -EINVAL; + + if ((0 > *f_pos) || (PACKET_SIZE > size)) + return -EINVAL; + + mutex_lock(&tl_reader_lock); + + while (copy_len < size) { + enum tl_stream_type stype; + unsigned int rb_idx_raw = 0; + unsigned int rb_idx; + size_t rb_size; + + /* If we don't have any data yet, wait for packet to be + * submitted. If we already read some packets and there is no + * packet pending return back to user. */ + if (0 < copy_len) { + if (!kbasep_tlstream_packet_pending( + &stype, + &rb_idx_raw)) + break; + } else { + if (wait_event_interruptible( + tl_event_queue, + kbasep_tlstream_packet_pending( + &stype, + &rb_idx_raw))) { + copy_len = -ERESTARTSYS; + break; + } + } + + /* Check if this packet fits into the user buffer. + * If so copy its content. */ + rb_idx = rb_idx_raw % PACKET_COUNT; + rb_size = atomic_read(&tl_stream[stype]->buffer[rb_idx].size); + if (rb_size > size - copy_len) + break; + if (copy_to_user( + &buffer[copy_len], + tl_stream[stype]->buffer[rb_idx].data, + rb_size)) { + copy_len = -EFAULT; + break; + } + + /* If the rbi still points to the packet we just processed + * then there was no overflow so we add the copied size to + * copy_len and move rbi on to the next packet + */ + smp_rmb(); + if (atomic_read(&tl_stream[stype]->rbi) == rb_idx_raw) { + copy_len += rb_size; + atomic_inc(&tl_stream[stype]->rbi); + +#if MALI_UNIT_TEST + atomic_add(rb_size, &tlstream_bytes_collected); +#endif /* MALI_UNIT_TEST */ + } + } + + mutex_unlock(&tl_reader_lock); + + return copy_len; +} + +/** + * kbasep_tlstream_poll - poll timeline stream for packets + * @filp: pointer to file structure + * @wait: pointer to poll table + * Return: POLLIN if data can be read without blocking, otherwise zero + */ +static unsigned int kbasep_tlstream_poll(struct file *filp, poll_table *wait) +{ + enum tl_stream_type stream_type; + unsigned int rb_idx; + + KBASE_DEBUG_ASSERT(filp); + KBASE_DEBUG_ASSERT(wait); + + poll_wait(filp, &tl_event_queue, wait); + if (kbasep_tlstream_packet_pending(&stream_type, &rb_idx)) + return POLLIN; + return 0; +} + +/** + * kbasep_tlstream_release - release timeline stream descriptor + * @inode: pointer to inode structure + * @filp: pointer to file structure + * + * Return always return zero + */ +static int kbasep_tlstream_release(struct inode *inode, struct file *filp) +{ + KBASE_DEBUG_ASSERT(inode); + KBASE_DEBUG_ASSERT(filp); + CSTD_UNUSED(inode); + CSTD_UNUSED(filp); + + /* Stop autoflush timer before releasing access to streams. */ + atomic_set(&autoflush_timer_active, 0); + del_timer_sync(&autoflush_timer); + + atomic_set(&kbase_tlstream_enabled, 0); + return 0; +} + +/** + * kbasep_tlstream_timeline_header - prepare timeline header stream packet + * @stream_type: type of the stream that will carry header data + * @tp_desc: pointer to array with tracepoint descriptors + * @tp_count: number of descriptors in the given array + * + * Functions fills in information about tracepoints stored in body stream + * associated with this header stream. + */ +static void kbasep_tlstream_timeline_header( + enum tl_stream_type stream_type, + const struct tp_desc *tp_desc, + u32 tp_count) +{ + const u8 tv = SWTRACE_VERSION; /* protocol version */ + const u8 ps = sizeof(void *); /* pointer size */ + size_t msg_size = sizeof(tv) + sizeof(ps) + sizeof(tp_count); + char *buffer; + size_t pos = 0; + unsigned long flags; + unsigned int i; + + KBASE_DEBUG_ASSERT(TL_STREAM_TYPE_COUNT > stream_type); + KBASE_DEBUG_ASSERT(tp_desc); + + /* Calculate the size of the timeline message. */ + for (i = 0; i < tp_count; i++) { + msg_size += sizeof(tp_desc[i].id); + msg_size += + strnlen(tp_desc[i].id_str, STRLEN_MAX) + + sizeof(char) + sizeof(u32); + msg_size += + strnlen(tp_desc[i].name, STRLEN_MAX) + + sizeof(char) + sizeof(u32); + msg_size += + strnlen(tp_desc[i].arg_types, STRLEN_MAX) + + sizeof(char) + sizeof(u32); + msg_size += + strnlen(tp_desc[i].arg_names, STRLEN_MAX) + + sizeof(char) + sizeof(u32); + } + + KBASE_DEBUG_ASSERT(PACKET_SIZE - PACKET_HEADER_SIZE >= msg_size); + + buffer = kbasep_tlstream_msgbuf_acquire(stream_type, msg_size, &flags); + KBASE_DEBUG_ASSERT(buffer); + + pos = kbasep_tlstream_write_bytes(buffer, pos, &tv, sizeof(tv)); + pos = kbasep_tlstream_write_bytes(buffer, pos, &ps, sizeof(ps)); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &tp_count, sizeof(tp_count)); + + for (i = 0; i < tp_count; i++) { + pos = kbasep_tlstream_write_bytes( + buffer, pos, + &tp_desc[i].id, sizeof(tp_desc[i].id)); + pos = kbasep_tlstream_write_string( + buffer, pos, + tp_desc[i].id_str, msg_size - pos); + pos = kbasep_tlstream_write_string( + buffer, pos, + tp_desc[i].name, msg_size - pos); + pos = kbasep_tlstream_write_string( + buffer, pos, + tp_desc[i].arg_types, msg_size - pos); + pos = kbasep_tlstream_write_string( + buffer, pos, + tp_desc[i].arg_names, msg_size - pos); + } + + KBASE_DEBUG_ASSERT(msg_size == pos); + + kbasep_tlstream_msgbuf_release(stream_type, flags); + + /* We don't expect any more data to be read in this stream. + * As header stream must be read before its associated body stream, + * make this packet visible to the user straightaway. */ + kbasep_tlstream_flush_stream(stream_type); +} + +/*****************************************************************************/ + +int kbase_tlstream_init(void) +{ + enum tl_stream_type i; + + /* Prepare stream structures. */ + for (i = 0; i < TL_STREAM_TYPE_COUNT; i++) { + tl_stream[i] = kmalloc(sizeof(**tl_stream), GFP_KERNEL); + if (!tl_stream[i]) + break; + kbasep_timeline_stream_init(tl_stream[i], i); + } + if (TL_STREAM_TYPE_COUNT > i) { + for (; i > 0; i--) { + kbasep_timeline_stream_term(tl_stream[i - 1]); + kfree(tl_stream[i - 1]); + } + return -ENOMEM; + } + + /* Initialize autoflush timer. */ + atomic_set(&autoflush_timer_active, 0); + timer_setup(&autoflush_timer, + kbasep_tlstream_autoflush_timer_callback, + 0); + + return 0; +} + +void kbase_tlstream_term(void) +{ + enum tl_stream_type i; + + for (i = 0; i < TL_STREAM_TYPE_COUNT; i++) { + kbasep_timeline_stream_term(tl_stream[i]); + kfree(tl_stream[i]); + } +} + +static void kbase_create_timeline_objects(struct kbase_context *kctx) +{ + struct kbase_device *kbdev = kctx->kbdev; + unsigned int lpu_id; + unsigned int as_nr; + struct kbasep_kctx_list_element *element; + + /* Create LPU objects. */ + for (lpu_id = 0; lpu_id < kbdev->gpu_props.num_job_slots; lpu_id++) { + u32 *lpu = + &kbdev->gpu_props.props.raw_props.js_features[lpu_id]; + KBASE_TLSTREAM_TL_SUMMARY_NEW_LPU(lpu, lpu_id, *lpu); + } + + /* Create Address Space objects. */ + for (as_nr = 0; as_nr < kbdev->nr_hw_address_spaces; as_nr++) + KBASE_TLSTREAM_TL_SUMMARY_NEW_AS(&kbdev->as[as_nr], as_nr); + + /* Create GPU object and make it retain all LPUs and address spaces. */ + KBASE_TLSTREAM_TL_SUMMARY_NEW_GPU( + kbdev, + kbdev->gpu_props.props.raw_props.gpu_id, + kbdev->gpu_props.num_cores); + + for (lpu_id = 0; lpu_id < kbdev->gpu_props.num_job_slots; lpu_id++) { + void *lpu = + &kbdev->gpu_props.props.raw_props.js_features[lpu_id]; + KBASE_TLSTREAM_TL_SUMMARY_LIFELINK_LPU_GPU(lpu, kbdev); + } + for (as_nr = 0; as_nr < kbdev->nr_hw_address_spaces; as_nr++) + KBASE_TLSTREAM_TL_SUMMARY_LIFELINK_AS_GPU( + &kbdev->as[as_nr], + kbdev); + + /* Create object for each known context. */ + mutex_lock(&kbdev->kctx_list_lock); + list_for_each_entry(element, &kbdev->kctx_list, link) { + KBASE_TLSTREAM_TL_SUMMARY_NEW_CTX( + element->kctx, + element->kctx->id, + (u32)(element->kctx->tgid)); + } + /* Before releasing the lock, reset body stream buffers. + * This will prevent context creation message to be directed to both + * summary and body stream. + */ + kbase_tlstream_reset_body_streams(); + mutex_unlock(&kbdev->kctx_list_lock); + /* Static object are placed into summary packet that needs to be + * transmitted first. Flush all streams to make it available to + * user space. + */ + kbase_tlstream_flush_streams(); +} + +int kbase_tlstream_acquire(struct kbase_context *kctx, u32 flags) +{ + int ret; + u32 tlstream_enabled = TLSTREAM_ENABLED | flags; + + if (0 == atomic_cmpxchg(&kbase_tlstream_enabled, 0, tlstream_enabled)) { + int rcode; + + ret = anon_inode_getfd( + "[mali_tlstream]", + &kbasep_tlstream_fops, + kctx, + O_RDONLY | O_CLOEXEC); + if (ret < 0) { + atomic_set(&kbase_tlstream_enabled, 0); + return ret; + } + + /* Reset and initialize header streams. */ + kbasep_timeline_stream_reset( + tl_stream[TL_STREAM_TYPE_OBJ_HEADER]); + kbasep_timeline_stream_reset( + tl_stream[TL_STREAM_TYPE_OBJ_SUMMARY]); + kbasep_timeline_stream_reset( + tl_stream[TL_STREAM_TYPE_AUX_HEADER]); + kbasep_tlstream_timeline_header( + TL_STREAM_TYPE_OBJ_HEADER, + tp_desc_obj, + ARRAY_SIZE(tp_desc_obj)); + kbasep_tlstream_timeline_header( + TL_STREAM_TYPE_AUX_HEADER, + tp_desc_aux, + ARRAY_SIZE(tp_desc_aux)); + + /* Start autoflush timer. */ + atomic_set(&autoflush_timer_active, 1); + rcode = mod_timer( + &autoflush_timer, + jiffies + msecs_to_jiffies(AUTOFLUSH_INTERVAL)); + CSTD_UNUSED(rcode); + + /* If job dumping is enabled, readjust the software event's + * timeout as the default value of 3 seconds is often + * insufficient. */ + if (flags & BASE_TLSTREAM_JOB_DUMPING_ENABLED) { + dev_info(kctx->kbdev->dev, + "Job dumping is enabled, readjusting the software event's timeout\n"); + atomic_set(&kctx->kbdev->js_data.soft_job_timeout_ms, + 1800000); + } + + /* Summary stream was cleared during acquire. + * Create static timeline objects that will be + * read by client. + */ + kbase_create_timeline_objects(kctx); + + } else { + ret = -EBUSY; + } + + return ret; +} + +void kbase_tlstream_flush_streams(void) +{ + enum tl_stream_type stype; + + for (stype = 0; stype < TL_STREAM_TYPE_COUNT; stype++) + kbasep_tlstream_flush_stream(stype); +} + +void kbase_tlstream_reset_body_streams(void) +{ + kbasep_timeline_stream_reset( + tl_stream[TL_STREAM_TYPE_OBJ]); + kbasep_timeline_stream_reset( + tl_stream[TL_STREAM_TYPE_AUX]); +} + +#if MALI_UNIT_TEST +void kbase_tlstream_stats(u32 *bytes_collected, u32 *bytes_generated) +{ + KBASE_DEBUG_ASSERT(bytes_collected); + KBASE_DEBUG_ASSERT(bytes_generated); + *bytes_collected = atomic_read(&tlstream_bytes_collected); + *bytes_generated = atomic_read(&tlstream_bytes_generated); +} +#endif /* MALI_UNIT_TEST */ + +/*****************************************************************************/ + +void __kbase_tlstream_tl_summary_new_ctx(void *context, u32 nr, u32 tgid) +{ + const u32 msg_id = KBASE_TL_NEW_CTX; + const size_t msg_size = + sizeof(msg_id) + sizeof(u64) + sizeof(context) + sizeof(nr) + + sizeof(tgid); + unsigned long flags; + char *buffer; + size_t pos = 0; + + buffer = kbasep_tlstream_msgbuf_acquire( + TL_STREAM_TYPE_OBJ_SUMMARY, + msg_size, &flags); + KBASE_DEBUG_ASSERT(buffer); + + pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id)); + pos = kbasep_tlstream_write_timestamp(buffer, pos); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &context, sizeof(context)); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &nr, sizeof(nr)); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &tgid, sizeof(tgid)); + + KBASE_DEBUG_ASSERT(msg_size == pos); + + kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ_SUMMARY, flags); +} + +void __kbase_tlstream_tl_summary_new_gpu(void *gpu, u32 id, u32 core_count) +{ + const u32 msg_id = KBASE_TL_NEW_GPU; + const size_t msg_size = + sizeof(msg_id) + sizeof(u64) + sizeof(gpu) + sizeof(id) + + sizeof(core_count); + unsigned long flags; + char *buffer; + size_t pos = 0; + + buffer = kbasep_tlstream_msgbuf_acquire( + TL_STREAM_TYPE_OBJ_SUMMARY, + msg_size, &flags); + KBASE_DEBUG_ASSERT(buffer); + + pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id)); + pos = kbasep_tlstream_write_timestamp(buffer, pos); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &gpu, sizeof(gpu)); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &id, sizeof(id)); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &core_count, sizeof(core_count)); + KBASE_DEBUG_ASSERT(msg_size == pos); + + kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ_SUMMARY, flags); +} + +void __kbase_tlstream_tl_summary_new_lpu(void *lpu, u32 nr, u32 fn) +{ + const u32 msg_id = KBASE_TL_NEW_LPU; + const size_t msg_size = + sizeof(msg_id) + sizeof(u64) + sizeof(lpu) + sizeof(nr) + + sizeof(fn); + unsigned long flags; + char *buffer; + size_t pos = 0; + + buffer = kbasep_tlstream_msgbuf_acquire( + TL_STREAM_TYPE_OBJ_SUMMARY, + msg_size, &flags); + KBASE_DEBUG_ASSERT(buffer); + + pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id)); + pos = kbasep_tlstream_write_timestamp(buffer, pos); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &lpu, sizeof(lpu)); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &nr, sizeof(nr)); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &fn, sizeof(fn)); + KBASE_DEBUG_ASSERT(msg_size == pos); + + kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ_SUMMARY, flags); +} + +void __kbase_tlstream_tl_summary_lifelink_lpu_gpu(void *lpu, void *gpu) +{ + const u32 msg_id = KBASE_TL_LIFELINK_LPU_GPU; + const size_t msg_size = + sizeof(msg_id) + sizeof(u64) + sizeof(lpu) + sizeof(gpu); + unsigned long flags; + char *buffer; + size_t pos = 0; + + buffer = kbasep_tlstream_msgbuf_acquire( + TL_STREAM_TYPE_OBJ_SUMMARY, + msg_size, &flags); + KBASE_DEBUG_ASSERT(buffer); + + pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id)); + pos = kbasep_tlstream_write_timestamp(buffer, pos); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &lpu, sizeof(lpu)); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &gpu, sizeof(gpu)); + KBASE_DEBUG_ASSERT(msg_size == pos); + + kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ_SUMMARY, flags); +} + +void __kbase_tlstream_tl_summary_new_as(void *as, u32 nr) +{ + const u32 msg_id = KBASE_TL_NEW_AS; + const size_t msg_size = + sizeof(msg_id) + sizeof(u64) + sizeof(as) + sizeof(nr); + unsigned long flags; + char *buffer; + size_t pos = 0; + + buffer = kbasep_tlstream_msgbuf_acquire( + TL_STREAM_TYPE_OBJ_SUMMARY, + msg_size, &flags); + KBASE_DEBUG_ASSERT(buffer); + + pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id)); + pos = kbasep_tlstream_write_timestamp(buffer, pos); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &as, sizeof(as)); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &nr, sizeof(nr)); + KBASE_DEBUG_ASSERT(msg_size == pos); + + kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ_SUMMARY, flags); +} + +void __kbase_tlstream_tl_summary_lifelink_as_gpu(void *as, void *gpu) +{ + const u32 msg_id = KBASE_TL_LIFELINK_AS_GPU; + const size_t msg_size = + sizeof(msg_id) + sizeof(u64) + sizeof(as) + sizeof(gpu); + unsigned long flags; + char *buffer; + size_t pos = 0; + + buffer = kbasep_tlstream_msgbuf_acquire( + TL_STREAM_TYPE_OBJ_SUMMARY, + msg_size, &flags); + KBASE_DEBUG_ASSERT(buffer); + + pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id)); + pos = kbasep_tlstream_write_timestamp(buffer, pos); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &as, sizeof(as)); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &gpu, sizeof(gpu)); + KBASE_DEBUG_ASSERT(msg_size == pos); + + kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ_SUMMARY, flags); +} + +/*****************************************************************************/ + +void __kbase_tlstream_tl_new_ctx(void *context, u32 nr, u32 tgid) +{ + const u32 msg_id = KBASE_TL_NEW_CTX; + const size_t msg_size = + sizeof(msg_id) + sizeof(u64) + sizeof(context) + sizeof(nr) + + sizeof(tgid); + unsigned long flags; + char *buffer; + size_t pos = 0; + + buffer = kbasep_tlstream_msgbuf_acquire( + TL_STREAM_TYPE_OBJ, + msg_size, &flags); + KBASE_DEBUG_ASSERT(buffer); + + pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id)); + pos = kbasep_tlstream_write_timestamp(buffer, pos); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &context, sizeof(context)); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &nr, sizeof(nr)); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &tgid, sizeof(tgid)); + KBASE_DEBUG_ASSERT(msg_size == pos); + + kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags); +} + +void __kbase_tlstream_tl_new_atom(void *atom, u32 nr) +{ + const u32 msg_id = KBASE_TL_NEW_ATOM; + const size_t msg_size = sizeof(msg_id) + sizeof(u64) + sizeof(atom) + + sizeof(nr); + unsigned long flags; + char *buffer; + size_t pos = 0; + + buffer = kbasep_tlstream_msgbuf_acquire( + TL_STREAM_TYPE_OBJ, + msg_size, &flags); + KBASE_DEBUG_ASSERT(buffer); + + pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id)); + pos = kbasep_tlstream_write_timestamp(buffer, pos); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &atom, sizeof(atom)); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &nr, sizeof(nr)); + KBASE_DEBUG_ASSERT(msg_size == pos); + + kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags); +} + +void __kbase_tlstream_tl_del_ctx(void *context) +{ + const u32 msg_id = KBASE_TL_DEL_CTX; + const size_t msg_size = + sizeof(msg_id) + sizeof(u64) + sizeof(context); + unsigned long flags; + char *buffer; + size_t pos = 0; + + buffer = kbasep_tlstream_msgbuf_acquire( + TL_STREAM_TYPE_OBJ, + msg_size, &flags); + KBASE_DEBUG_ASSERT(buffer); + + pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id)); + pos = kbasep_tlstream_write_timestamp(buffer, pos); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &context, sizeof(context)); + KBASE_DEBUG_ASSERT(msg_size == pos); + + kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags); +} + +void __kbase_tlstream_tl_del_atom(void *atom) +{ + const u32 msg_id = KBASE_TL_DEL_ATOM; + const size_t msg_size = + sizeof(msg_id) + sizeof(u64) + sizeof(atom); + unsigned long flags; + char *buffer; + size_t pos = 0; + + buffer = kbasep_tlstream_msgbuf_acquire( + TL_STREAM_TYPE_OBJ, + msg_size, &flags); + KBASE_DEBUG_ASSERT(buffer); + + pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id)); + pos = kbasep_tlstream_write_timestamp(buffer, pos); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &atom, sizeof(atom)); + KBASE_DEBUG_ASSERT(msg_size == pos); + + kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags); +} + +void __kbase_tlstream_tl_ret_ctx_lpu(void *context, void *lpu) +{ + const u32 msg_id = KBASE_TL_RET_CTX_LPU; + const size_t msg_size = + sizeof(msg_id) + sizeof(u64) + sizeof(context) + sizeof(lpu); + unsigned long flags; + char *buffer; + size_t pos = 0; + + buffer = kbasep_tlstream_msgbuf_acquire( + TL_STREAM_TYPE_OBJ, + msg_size, &flags); + KBASE_DEBUG_ASSERT(buffer); + + pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id)); + pos = kbasep_tlstream_write_timestamp(buffer, pos); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &context, sizeof(context)); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &lpu, sizeof(lpu)); + KBASE_DEBUG_ASSERT(msg_size == pos); + + kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags); +} + +void __kbase_tlstream_tl_ret_atom_ctx(void *atom, void *context) +{ + const u32 msg_id = KBASE_TL_RET_ATOM_CTX; + const size_t msg_size = + sizeof(msg_id) + sizeof(u64) + sizeof(atom) + sizeof(context); + unsigned long flags; + char *buffer; + size_t pos = 0; + + buffer = kbasep_tlstream_msgbuf_acquire( + TL_STREAM_TYPE_OBJ, + msg_size, &flags); + KBASE_DEBUG_ASSERT(buffer); + + pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id)); + pos = kbasep_tlstream_write_timestamp(buffer, pos); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &atom, sizeof(atom)); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &context, sizeof(context)); + KBASE_DEBUG_ASSERT(msg_size == pos); + + kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags); +} + +void __kbase_tlstream_tl_ret_atom_lpu( + void *atom, void *lpu, const char *attrib_match_list) +{ + const u32 msg_id = KBASE_TL_RET_ATOM_LPU; + const size_t msg_s0 = sizeof(u32) + sizeof(char) + + strnlen(attrib_match_list, STRLEN_MAX); + const size_t msg_size = + sizeof(msg_id) + sizeof(u64) + + sizeof(atom) + sizeof(lpu) + msg_s0; + unsigned long flags; + char *buffer; + size_t pos = 0; + + buffer = kbasep_tlstream_msgbuf_acquire( + TL_STREAM_TYPE_OBJ, + msg_size, &flags); + KBASE_DEBUG_ASSERT(buffer); + + pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id)); + pos = kbasep_tlstream_write_timestamp(buffer, pos); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &atom, sizeof(atom)); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &lpu, sizeof(lpu)); + pos = kbasep_tlstream_write_string( + buffer, pos, attrib_match_list, msg_s0); + KBASE_DEBUG_ASSERT(msg_size == pos); + + kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags); +} + +void __kbase_tlstream_tl_nret_ctx_lpu(void *context, void *lpu) +{ + const u32 msg_id = KBASE_TL_NRET_CTX_LPU; + const size_t msg_size = + sizeof(msg_id) + sizeof(u64) + sizeof(context) + sizeof(lpu); + unsigned long flags; + char *buffer; + size_t pos = 0; + + buffer = kbasep_tlstream_msgbuf_acquire( + TL_STREAM_TYPE_OBJ, + msg_size, &flags); + KBASE_DEBUG_ASSERT(buffer); + + pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id)); + pos = kbasep_tlstream_write_timestamp(buffer, pos); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &context, sizeof(context)); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &lpu, sizeof(lpu)); + KBASE_DEBUG_ASSERT(msg_size == pos); + + kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags); +} + +void __kbase_tlstream_tl_nret_atom_ctx(void *atom, void *context) +{ + const u32 msg_id = KBASE_TL_NRET_ATOM_CTX; + const size_t msg_size = + sizeof(msg_id) + sizeof(u64) + sizeof(atom) + sizeof(context); + unsigned long flags; + char *buffer; + size_t pos = 0; + + buffer = kbasep_tlstream_msgbuf_acquire( + TL_STREAM_TYPE_OBJ, + msg_size, &flags); + KBASE_DEBUG_ASSERT(buffer); + + pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id)); + pos = kbasep_tlstream_write_timestamp(buffer, pos); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &atom, sizeof(atom)); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &context, sizeof(context)); + KBASE_DEBUG_ASSERT(msg_size == pos); + + kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags); +} + +void __kbase_tlstream_tl_dep_atom_atom(void *atom1, void *atom2) +{ + const u32 msg_id = KBASE_TL_DEP_ATOM_ATOM; + const size_t msg_size = + sizeof(msg_id) + sizeof(u64) + sizeof(atom1) + sizeof(atom2); + unsigned long flags; + char *buffer; + size_t pos = 0; + + buffer = kbasep_tlstream_msgbuf_acquire( + TL_STREAM_TYPE_OBJ, + msg_size, &flags); + KBASE_DEBUG_ASSERT(buffer); + + pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id)); + pos = kbasep_tlstream_write_timestamp(buffer, pos); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &atom1, sizeof(atom1)); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &atom2, sizeof(atom2)); + KBASE_DEBUG_ASSERT(msg_size == pos); + + kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags); +} + +void __kbase_tlstream_tl_ndep_atom_atom(void *atom1, void *atom2) +{ + const u32 msg_id = KBASE_TL_NDEP_ATOM_ATOM; + const size_t msg_size = + sizeof(msg_id) + sizeof(u64) + sizeof(atom1) + sizeof(atom2); + unsigned long flags; + char *buffer; + size_t pos = 0; + + buffer = kbasep_tlstream_msgbuf_acquire( + TL_STREAM_TYPE_OBJ, + msg_size, &flags); + KBASE_DEBUG_ASSERT(buffer); + + pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id)); + pos = kbasep_tlstream_write_timestamp(buffer, pos); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &atom1, sizeof(atom1)); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &atom2, sizeof(atom2)); + KBASE_DEBUG_ASSERT(msg_size == pos); + + kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags); +} + +void __kbase_tlstream_tl_rdep_atom_atom(void *atom1, void *atom2) +{ + const u32 msg_id = KBASE_TL_RDEP_ATOM_ATOM; + const size_t msg_size = + sizeof(msg_id) + sizeof(u64) + sizeof(atom1) + sizeof(atom2); + unsigned long flags; + char *buffer; + size_t pos = 0; + + buffer = kbasep_tlstream_msgbuf_acquire( + TL_STREAM_TYPE_OBJ, + msg_size, &flags); + KBASE_DEBUG_ASSERT(buffer); + + pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id)); + pos = kbasep_tlstream_write_timestamp(buffer, pos); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &atom1, sizeof(atom1)); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &atom2, sizeof(atom2)); + KBASE_DEBUG_ASSERT(msg_size == pos); + + kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags); +} + +void __kbase_tlstream_tl_nret_atom_lpu(void *atom, void *lpu) +{ + const u32 msg_id = KBASE_TL_NRET_ATOM_LPU; + const size_t msg_size = + sizeof(msg_id) + sizeof(u64) + sizeof(atom) + sizeof(lpu); + unsigned long flags; + char *buffer; + size_t pos = 0; + + buffer = kbasep_tlstream_msgbuf_acquire( + TL_STREAM_TYPE_OBJ, + msg_size, &flags); + KBASE_DEBUG_ASSERT(buffer); + + pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id)); + pos = kbasep_tlstream_write_timestamp(buffer, pos); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &atom, sizeof(atom)); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &lpu, sizeof(lpu)); + KBASE_DEBUG_ASSERT(msg_size == pos); + + kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags); +} + +void __kbase_tlstream_tl_ret_as_ctx(void *as, void *ctx) +{ + const u32 msg_id = KBASE_TL_RET_AS_CTX; + const size_t msg_size = + sizeof(msg_id) + sizeof(u64) + sizeof(as) + sizeof(ctx); + unsigned long flags; + char *buffer; + size_t pos = 0; + + buffer = kbasep_tlstream_msgbuf_acquire( + TL_STREAM_TYPE_OBJ, + msg_size, &flags); + KBASE_DEBUG_ASSERT(buffer); + + pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id)); + pos = kbasep_tlstream_write_timestamp(buffer, pos); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &as, sizeof(as)); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &ctx, sizeof(ctx)); + KBASE_DEBUG_ASSERT(msg_size == pos); + + kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags); +} + +void __kbase_tlstream_tl_nret_as_ctx(void *as, void *ctx) +{ + const u32 msg_id = KBASE_TL_NRET_AS_CTX; + const size_t msg_size = + sizeof(msg_id) + sizeof(u64) + sizeof(as) + sizeof(ctx); + unsigned long flags; + char *buffer; + size_t pos = 0; + + buffer = kbasep_tlstream_msgbuf_acquire( + TL_STREAM_TYPE_OBJ, + msg_size, &flags); + KBASE_DEBUG_ASSERT(buffer); + + pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id)); + pos = kbasep_tlstream_write_timestamp(buffer, pos); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &as, sizeof(as)); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &ctx, sizeof(ctx)); + KBASE_DEBUG_ASSERT(msg_size == pos); + + kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags); +} + +void __kbase_tlstream_tl_ret_atom_as(void *atom, void *as) +{ + const u32 msg_id = KBASE_TL_RET_ATOM_AS; + const size_t msg_size = + sizeof(msg_id) + sizeof(u64) + sizeof(atom) + sizeof(as); + unsigned long flags; + char *buffer; + size_t pos = 0; + + buffer = kbasep_tlstream_msgbuf_acquire( + TL_STREAM_TYPE_OBJ, + msg_size, &flags); + KBASE_DEBUG_ASSERT(buffer); + + pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id)); + pos = kbasep_tlstream_write_timestamp(buffer, pos); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &atom, sizeof(atom)); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &as, sizeof(as)); + KBASE_DEBUG_ASSERT(msg_size == pos); + + kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags); +} + +void __kbase_tlstream_tl_nret_atom_as(void *atom, void *as) +{ + const u32 msg_id = KBASE_TL_NRET_ATOM_AS; + const size_t msg_size = + sizeof(msg_id) + sizeof(u64) + sizeof(atom) + sizeof(as); + unsigned long flags; + char *buffer; + size_t pos = 0; + + buffer = kbasep_tlstream_msgbuf_acquire( + TL_STREAM_TYPE_OBJ, + msg_size, &flags); + KBASE_DEBUG_ASSERT(buffer); + + pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id)); + pos = kbasep_tlstream_write_timestamp(buffer, pos); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &atom, sizeof(atom)); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &as, sizeof(as)); + KBASE_DEBUG_ASSERT(msg_size == pos); + + kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags); +} + +void __kbase_tlstream_tl_attrib_atom_config( + void *atom, u64 jd, u64 affinity, u32 config) +{ + const u32 msg_id = KBASE_TL_ATTRIB_ATOM_CONFIG; + const size_t msg_size = + sizeof(msg_id) + sizeof(u64) + sizeof(atom) + + sizeof(jd) + sizeof(affinity) + sizeof(config); + unsigned long flags; + char *buffer; + size_t pos = 0; + + buffer = kbasep_tlstream_msgbuf_acquire( + TL_STREAM_TYPE_OBJ, + msg_size, &flags); + KBASE_DEBUG_ASSERT(buffer); + + pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id)); + pos = kbasep_tlstream_write_timestamp(buffer, pos); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &atom, sizeof(atom)); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &jd, sizeof(jd)); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &affinity, sizeof(affinity)); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &config, sizeof(config)); + KBASE_DEBUG_ASSERT(msg_size == pos); + + kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags); +} + +void __kbase_tlstream_tl_attrib_atom_priority(void *atom, u32 prio) +{ + const u32 msg_id = KBASE_TL_ATTRIB_ATOM_PRIORITY; + const size_t msg_size = + sizeof(msg_id) + sizeof(u64) + sizeof(atom) + sizeof(prio); + unsigned long flags; + char *buffer; + size_t pos = 0; + + buffer = kbasep_tlstream_msgbuf_acquire( + TL_STREAM_TYPE_OBJ, + msg_size, &flags); + KBASE_DEBUG_ASSERT(buffer); + + pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id)); + pos = kbasep_tlstream_write_timestamp(buffer, pos); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &atom, sizeof(atom)); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &prio, sizeof(prio)); + KBASE_DEBUG_ASSERT(msg_size == pos); + + kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags); +} + +void __kbase_tlstream_tl_attrib_atom_state(void *atom, u32 state) +{ + const u32 msg_id = KBASE_TL_ATTRIB_ATOM_STATE; + const size_t msg_size = + sizeof(msg_id) + sizeof(u64) + sizeof(atom) + sizeof(state); + unsigned long flags; + char *buffer; + size_t pos = 0; + + buffer = kbasep_tlstream_msgbuf_acquire( + TL_STREAM_TYPE_OBJ, + msg_size, &flags); + KBASE_DEBUG_ASSERT(buffer); + + pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id)); + pos = kbasep_tlstream_write_timestamp(buffer, pos); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &atom, sizeof(atom)); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &state, sizeof(state)); + KBASE_DEBUG_ASSERT(msg_size == pos); + + kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags); +} + +void __kbase_tlstream_tl_attrib_atom_priority_change(void *atom) +{ + const u32 msg_id = KBASE_TL_ATTRIB_ATOM_PRIORITY_CHANGE; + const size_t msg_size = + sizeof(msg_id) + sizeof(u64) + sizeof(atom); + unsigned long flags; + char *buffer; + size_t pos = 0; + + buffer = kbasep_tlstream_msgbuf_acquire( + TL_STREAM_TYPE_OBJ, + msg_size, &flags); + KBASE_DEBUG_ASSERT(buffer); + + pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id)); + pos = kbasep_tlstream_write_timestamp(buffer, pos); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &atom, sizeof(atom)); + KBASE_DEBUG_ASSERT(msg_size == pos); + + kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags); +} + +void __kbase_tlstream_tl_attrib_atom_jit( + void *atom, u64 edit_addr, u64 new_addr) +{ + const u32 msg_id = KBASE_TL_ATTRIB_ATOM_JIT; + const size_t msg_size = + sizeof(msg_id) + sizeof(u64) + sizeof(atom) + + sizeof(edit_addr) + sizeof(new_addr); + unsigned long flags; + char *buffer; + size_t pos = 0; + + buffer = kbasep_tlstream_msgbuf_acquire( + TL_STREAM_TYPE_OBJ, + msg_size, &flags); + KBASE_DEBUG_ASSERT(buffer); + + pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id)); + pos = kbasep_tlstream_write_timestamp(buffer, pos); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &atom, sizeof(atom)); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &edit_addr, sizeof(edit_addr)); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &new_addr, sizeof(new_addr)); + KBASE_DEBUG_ASSERT(msg_size == pos); + + kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags); +} + +void __kbase_tlstream_tl_attrib_as_config( + void *as, u64 transtab, u64 memattr, u64 transcfg) +{ + const u32 msg_id = KBASE_TL_ATTRIB_AS_CONFIG; + const size_t msg_size = + sizeof(msg_id) + sizeof(u64) + sizeof(as) + + sizeof(transtab) + sizeof(memattr) + sizeof(transcfg); + unsigned long flags; + char *buffer; + size_t pos = 0; + + buffer = kbasep_tlstream_msgbuf_acquire( + TL_STREAM_TYPE_OBJ, + msg_size, &flags); + KBASE_DEBUG_ASSERT(buffer); + + pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id)); + pos = kbasep_tlstream_write_timestamp(buffer, pos); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &as, sizeof(as)); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &transtab, sizeof(transtab)); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &memattr, sizeof(memattr)); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &transcfg, sizeof(transcfg)); + KBASE_DEBUG_ASSERT(msg_size == pos); + + kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags); +} + +void __kbase_tlstream_tl_event_lpu_softstop(void *lpu) +{ + const u32 msg_id = KBASE_TL_EVENT_LPU_SOFTSTOP; + const size_t msg_size = + sizeof(msg_id) + sizeof(u64) + sizeof(lpu); + unsigned long flags; + char *buffer; + size_t pos = 0; + + buffer = kbasep_tlstream_msgbuf_acquire( + TL_STREAM_TYPE_OBJ, + msg_size, &flags); + KBASE_DEBUG_ASSERT(buffer); + + pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id)); + pos = kbasep_tlstream_write_timestamp(buffer, pos); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &lpu, sizeof(lpu)); + KBASE_DEBUG_ASSERT(msg_size == pos); + + kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags); +} + +void __kbase_tlstream_tl_event_atom_softstop_ex(void *atom) +{ + const u32 msg_id = KBASE_TL_EVENT_ATOM_SOFTSTOP_EX; + const size_t msg_size = + sizeof(msg_id) + sizeof(u64) + sizeof(atom); + unsigned long flags; + char *buffer; + size_t pos = 0; + + buffer = kbasep_tlstream_msgbuf_acquire( + TL_STREAM_TYPE_OBJ, + msg_size, &flags); + KBASE_DEBUG_ASSERT(buffer); + + pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id)); + pos = kbasep_tlstream_write_timestamp(buffer, pos); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &atom, sizeof(atom)); + KBASE_DEBUG_ASSERT(msg_size == pos); + + kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags); +} + +void __kbase_tlstream_tl_event_atom_softstop_issue(void *atom) +{ + const u32 msg_id = KBASE_TL_EVENT_ATOM_SOFTSTOP_ISSUE; + const size_t msg_size = + sizeof(msg_id) + sizeof(u64) + sizeof(atom); + unsigned long flags; + char *buffer; + size_t pos = 0; + + buffer = kbasep_tlstream_msgbuf_acquire( + TL_STREAM_TYPE_OBJ, + msg_size, &flags); + KBASE_DEBUG_ASSERT(buffer); + + pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id)); + pos = kbasep_tlstream_write_timestamp(buffer, pos); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &atom, sizeof(atom)); + KBASE_DEBUG_ASSERT(msg_size == pos); + + kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags); +} + +void __kbase_tlstream_jd_gpu_soft_reset(void *gpu) +{ + const u32 msg_id = KBASE_JD_GPU_SOFT_RESET; + const size_t msg_size = + sizeof(msg_id) + sizeof(u64) + sizeof(gpu); + unsigned long flags; + char *buffer; + size_t pos = 0; + + buffer = kbasep_tlstream_msgbuf_acquire( + TL_STREAM_TYPE_OBJ, + msg_size, &flags); + KBASE_DEBUG_ASSERT(buffer); + + pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id)); + pos = kbasep_tlstream_write_timestamp(buffer, pos); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &gpu, sizeof(gpu)); + KBASE_DEBUG_ASSERT(msg_size == pos); + + kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags); +} + +/*****************************************************************************/ + +void __kbase_tlstream_aux_pm_state(u32 core_type, u64 state) +{ + const u32 msg_id = KBASE_AUX_PM_STATE; + const size_t msg_size = + sizeof(msg_id) + sizeof(u64) + sizeof(core_type) + + sizeof(state); + unsigned long flags; + char *buffer; + size_t pos = 0; + + buffer = kbasep_tlstream_msgbuf_acquire( + TL_STREAM_TYPE_AUX, + msg_size, &flags); + KBASE_DEBUG_ASSERT(buffer); + + pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id)); + pos = kbasep_tlstream_write_timestamp(buffer, pos); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &core_type, sizeof(core_type)); + pos = kbasep_tlstream_write_bytes(buffer, pos, &state, sizeof(state)); + KBASE_DEBUG_ASSERT(msg_size == pos); + + kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_AUX, flags); +} + +void __kbase_tlstream_aux_pagefault(u32 ctx_nr, u64 page_count_change) +{ + const u32 msg_id = KBASE_AUX_PAGEFAULT; + const size_t msg_size = + sizeof(msg_id) + sizeof(u64) + sizeof(ctx_nr) + + sizeof(page_count_change); + unsigned long flags; + char *buffer; + size_t pos = 0; + + buffer = kbasep_tlstream_msgbuf_acquire( + TL_STREAM_TYPE_AUX, msg_size, &flags); + KBASE_DEBUG_ASSERT(buffer); + + pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id)); + pos = kbasep_tlstream_write_timestamp(buffer, pos); + pos = kbasep_tlstream_write_bytes(buffer, pos, &ctx_nr, sizeof(ctx_nr)); + pos = kbasep_tlstream_write_bytes( + buffer, pos, + &page_count_change, sizeof(page_count_change)); + KBASE_DEBUG_ASSERT(msg_size == pos); + + kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_AUX, flags); +} + +void __kbase_tlstream_aux_pagesalloc(u32 ctx_nr, u64 page_count) +{ + const u32 msg_id = KBASE_AUX_PAGESALLOC; + const size_t msg_size = + sizeof(msg_id) + sizeof(u64) + sizeof(ctx_nr) + + sizeof(page_count); + unsigned long flags; + char *buffer; + size_t pos = 0; + + buffer = kbasep_tlstream_msgbuf_acquire( + TL_STREAM_TYPE_AUX, msg_size, &flags); + KBASE_DEBUG_ASSERT(buffer); + + pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id)); + pos = kbasep_tlstream_write_timestamp(buffer, pos); + pos = kbasep_tlstream_write_bytes(buffer, pos, &ctx_nr, sizeof(ctx_nr)); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &page_count, sizeof(page_count)); + KBASE_DEBUG_ASSERT(msg_size == pos); + + kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_AUX, flags); +} + +void __kbase_tlstream_aux_devfreq_target(u64 target_freq) +{ + const u32 msg_id = KBASE_AUX_DEVFREQ_TARGET; + const size_t msg_size = + sizeof(msg_id) + sizeof(u64) + sizeof(target_freq); + unsigned long flags; + char *buffer; + size_t pos = 0; + + buffer = kbasep_tlstream_msgbuf_acquire( + TL_STREAM_TYPE_AUX, msg_size, &flags); + KBASE_DEBUG_ASSERT(buffer); + + pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id)); + pos = kbasep_tlstream_write_timestamp(buffer, pos); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &target_freq, sizeof(target_freq)); + KBASE_DEBUG_ASSERT(msg_size == pos); + + kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_AUX, flags); +} + +void __kbase_tlstream_aux_protected_enter_start(void *gpu) +{ + const u32 msg_id = KBASE_AUX_PROTECTED_ENTER_START; + const size_t msg_size = + sizeof(msg_id) + sizeof(u64) + sizeof(gpu); + unsigned long flags; + char *buffer; + size_t pos = 0; + + buffer = kbasep_tlstream_msgbuf_acquire( + TL_STREAM_TYPE_AUX, + msg_size, &flags); + KBASE_DEBUG_ASSERT(buffer); + + pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id)); + pos = kbasep_tlstream_write_timestamp(buffer, pos); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &gpu, sizeof(gpu)); + KBASE_DEBUG_ASSERT(msg_size == pos); + + kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_AUX, flags); +} +void __kbase_tlstream_aux_protected_enter_end(void *gpu) +{ + const u32 msg_id = KBASE_AUX_PROTECTED_ENTER_END; + const size_t msg_size = + sizeof(msg_id) + sizeof(u64) + sizeof(gpu); + unsigned long flags; + char *buffer; + size_t pos = 0; + + buffer = kbasep_tlstream_msgbuf_acquire( + TL_STREAM_TYPE_AUX, + msg_size, &flags); + KBASE_DEBUG_ASSERT(buffer); + + pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id)); + pos = kbasep_tlstream_write_timestamp(buffer, pos); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &gpu, sizeof(gpu)); + KBASE_DEBUG_ASSERT(msg_size == pos); + + kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_AUX, flags); +} + +void __kbase_tlstream_aux_protected_leave_start(void *gpu) +{ + const u32 msg_id = KBASE_AUX_PROTECTED_LEAVE_START; + const size_t msg_size = + sizeof(msg_id) + sizeof(u64) + sizeof(gpu); + unsigned long flags; + char *buffer; + size_t pos = 0; + + buffer = kbasep_tlstream_msgbuf_acquire( + TL_STREAM_TYPE_AUX, + msg_size, &flags); + KBASE_DEBUG_ASSERT(buffer); + + pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id)); + pos = kbasep_tlstream_write_timestamp(buffer, pos); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &gpu, sizeof(gpu)); + KBASE_DEBUG_ASSERT(msg_size == pos); + + kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_AUX, flags); +} +void __kbase_tlstream_aux_protected_leave_end(void *gpu) +{ + const u32 msg_id = KBASE_AUX_PROTECTED_LEAVE_END; + const size_t msg_size = + sizeof(msg_id) + sizeof(u64) + sizeof(gpu); + unsigned long flags; + char *buffer; + size_t pos = 0; + + buffer = kbasep_tlstream_msgbuf_acquire( + TL_STREAM_TYPE_AUX, + msg_size, &flags); + KBASE_DEBUG_ASSERT(buffer); + + pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id)); + pos = kbasep_tlstream_write_timestamp(buffer, pos); + pos = kbasep_tlstream_write_bytes( + buffer, pos, &gpu, sizeof(gpu)); + KBASE_DEBUG_ASSERT(msg_size == pos); + + kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_AUX, flags); +} diff --git a/drivers/gpu/arm/midgard/mali_kbase_tlstream.h b/drivers/gpu/arm/midgard/mali_kbase_tlstream.h new file mode 100644 index 00000000000000..c0a1117d5f25c1 --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_tlstream.h @@ -0,0 +1,623 @@ +/* + * + * (C) COPYRIGHT 2015-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +#if !defined(_KBASE_TLSTREAM_H) +#define _KBASE_TLSTREAM_H + +#include + +/*****************************************************************************/ + +/** + * kbase_tlstream_init - initialize timeline infrastructure in kernel + * Return: zero on success, negative number on error + */ +int kbase_tlstream_init(void); + +/** + * kbase_tlstream_term - terminate timeline infrastructure in kernel + * + * Timeline need have to been previously enabled with kbase_tlstream_init(). + */ +void kbase_tlstream_term(void); + +/** + * kbase_tlstream_acquire - acquire timeline stream file descriptor + * @kctx: kernel common context + * @flags: timeline stream flags + * + * This descriptor is meant to be used by userspace timeline to gain access to + * kernel timeline stream. This stream is later broadcasted by user space to the + * timeline client. + * Only one entity can own the descriptor at any given time. Descriptor shall be + * closed if unused. If descriptor cannot be obtained (i.e. when it is already + * being used) return will be a negative value. + * + * Return: file descriptor on success, negative number on error + */ +int kbase_tlstream_acquire(struct kbase_context *kctx, u32 flags); + +/** + * kbase_tlstream_flush_streams - flush timeline streams. + * + * Function will flush pending data in all timeline streams. + */ +void kbase_tlstream_flush_streams(void); + +/** + * kbase_tlstream_reset_body_streams - reset timeline body streams. + * + * Function will discard pending data in all timeline body streams. + */ +void kbase_tlstream_reset_body_streams(void); + +#if MALI_UNIT_TEST +/** + * kbase_tlstream_test - start timeline stream data generator + * @tpw_count: number of trace point writers in each context + * @msg_delay: time delay in milliseconds between trace points written by one + * writer + * @msg_count: number of trace points written by one writer + * @aux_msg: if non-zero aux messages will be included + * + * This test starts a requested number of asynchronous writers in both IRQ and + * thread context. Each writer will generate required number of test + * tracepoints (tracepoints with embedded information about writer that + * should be verified by user space reader). Tracepoints will be emitted in + * all timeline body streams. If aux_msg is non-zero writer will also + * generate not testable tracepoints (tracepoints without information about + * writer). These tracepoints are used to check correctness of remaining + * timeline message generating functions. Writer will wait requested time + * between generating another set of messages. This call blocks until all + * writers finish. + */ +void kbase_tlstream_test( + unsigned int tpw_count, + unsigned int msg_delay, + unsigned int msg_count, + int aux_msg); + +/** + * kbase_tlstream_stats - read timeline stream statistics + * @bytes_collected: will hold number of bytes read by the user + * @bytes_generated: will hold number of bytes generated by trace points + */ +void kbase_tlstream_stats(u32 *bytes_collected, u32 *bytes_generated); +#endif /* MALI_UNIT_TEST */ + +/*****************************************************************************/ + +#define TL_ATOM_STATE_IDLE 0 +#define TL_ATOM_STATE_READY 1 +#define TL_ATOM_STATE_DONE 2 +#define TL_ATOM_STATE_POSTED 3 + +void __kbase_tlstream_tl_summary_new_ctx(void *context, u32 nr, u32 tgid); +void __kbase_tlstream_tl_summary_new_gpu(void *gpu, u32 id, u32 core_count); +void __kbase_tlstream_tl_summary_new_lpu(void *lpu, u32 nr, u32 fn); +void __kbase_tlstream_tl_summary_lifelink_lpu_gpu(void *lpu, void *gpu); +void __kbase_tlstream_tl_summary_new_as(void *as, u32 nr); +void __kbase_tlstream_tl_summary_lifelink_as_gpu(void *as, void *gpu); +void __kbase_tlstream_tl_new_ctx(void *context, u32 nr, u32 tgid); +void __kbase_tlstream_tl_new_atom(void *atom, u32 nr); +void __kbase_tlstream_tl_del_ctx(void *context); +void __kbase_tlstream_tl_del_atom(void *atom); +void __kbase_tlstream_tl_ret_ctx_lpu(void *context, void *lpu); +void __kbase_tlstream_tl_ret_atom_ctx(void *atom, void *context); +void __kbase_tlstream_tl_ret_atom_lpu( + void *atom, void *lpu, const char *attrib_match_list); +void __kbase_tlstream_tl_nret_ctx_lpu(void *context, void *lpu); +void __kbase_tlstream_tl_nret_atom_ctx(void *atom, void *context); +void __kbase_tlstream_tl_nret_atom_lpu(void *atom, void *lpu); +void __kbase_tlstream_tl_ret_as_ctx(void *as, void *ctx); +void __kbase_tlstream_tl_nret_as_ctx(void *as, void *ctx); +void __kbase_tlstream_tl_ret_atom_as(void *atom, void *as); +void __kbase_tlstream_tl_nret_atom_as(void *atom, void *as); +void __kbase_tlstream_tl_dep_atom_atom(void *atom1, void *atom2); +void __kbase_tlstream_tl_ndep_atom_atom(void *atom1, void *atom2); +void __kbase_tlstream_tl_rdep_atom_atom(void *atom1, void *atom2); +void __kbase_tlstream_tl_attrib_atom_config( + void *atom, u64 jd, u64 affinity, u32 config); +void __kbase_tlstream_tl_attrib_atom_priority(void *atom, u32 prio); +void __kbase_tlstream_tl_attrib_atom_state(void *atom, u32 state); +void __kbase_tlstream_tl_attrib_atom_priority_change(void *atom); +void __kbase_tlstream_tl_attrib_atom_jit( + void *atom, u64 edit_addr, u64 new_addr); +void __kbase_tlstream_tl_attrib_as_config( + void *as, u64 transtab, u64 memattr, u64 transcfg); +void __kbase_tlstream_tl_event_atom_softstop_ex(void *atom); +void __kbase_tlstream_tl_event_lpu_softstop(void *lpu); +void __kbase_tlstream_tl_event_atom_softstop_issue(void *atom); +void __kbase_tlstream_jd_gpu_soft_reset(void *gpu); +void __kbase_tlstream_aux_pm_state(u32 core_type, u64 state); +void __kbase_tlstream_aux_pagefault(u32 ctx_nr, u64 page_count_change); +void __kbase_tlstream_aux_pagesalloc(u32 ctx_nr, u64 page_count); +void __kbase_tlstream_aux_devfreq_target(u64 target_freq); +void __kbase_tlstream_aux_protected_enter_start(void *gpu); +void __kbase_tlstream_aux_protected_enter_end(void *gpu); +void __kbase_tlstream_aux_protected_leave_start(void *gpu); +void __kbase_tlstream_aux_protected_leave_end(void *gpu); + +#define TLSTREAM_ENABLED (1 << 31) + +extern atomic_t kbase_tlstream_enabled; + +#define __TRACE_IF_ENABLED(trace_name, ...) \ + do { \ + int enabled = atomic_read(&kbase_tlstream_enabled); \ + if (enabled & TLSTREAM_ENABLED) \ + __kbase_tlstream_##trace_name(__VA_ARGS__); \ + } while (0) + +#define __TRACE_IF_ENABLED_LATENCY(trace_name, ...) \ + do { \ + int enabled = atomic_read(&kbase_tlstream_enabled); \ + if (enabled & BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS) \ + __kbase_tlstream_##trace_name(__VA_ARGS__); \ + } while (0) + +#define __TRACE_IF_ENABLED_JD(trace_name, ...) \ + do { \ + int enabled = atomic_read(&kbase_tlstream_enabled); \ + if (enabled & BASE_TLSTREAM_JOB_DUMPING_ENABLED) \ + __kbase_tlstream_##trace_name(__VA_ARGS__); \ + } while (0) + +/*****************************************************************************/ + +/** + * KBASE_TLSTREAM_TL_SUMMARY_NEW_CTX - create context object in timeline + * summary + * @context: name of the context object + * @nr: context number + * @tgid: thread Group Id + * + * Function emits a timeline message informing about context creation. Context + * is created with context number (its attribute), that can be used to link + * kbase context with userspace context. + * This message is directed to timeline summary stream. + */ +#define KBASE_TLSTREAM_TL_SUMMARY_NEW_CTX(context, nr, tgid) \ + __TRACE_IF_ENABLED(tl_summary_new_ctx, context, nr, tgid) + +/** + * KBASE_TLSTREAM_TL_SUMMARY_NEW_GPU - create GPU object in timeline summary + * @gpu: name of the GPU object + * @id: id value of this GPU + * @core_count: number of cores this GPU hosts + * + * Function emits a timeline message informing about GPU creation. GPU is + * created with two attributes: id and core count. + * This message is directed to timeline summary stream. + */ +#define KBASE_TLSTREAM_TL_SUMMARY_NEW_GPU(gpu, id, core_count) \ + __TRACE_IF_ENABLED(tl_summary_new_gpu, gpu, id, core_count) + +/** + * KBASE_TLSTREAM_TL_SUMMARY_NEW_LPU - create LPU object in timeline summary + * @lpu: name of the Logical Processing Unit object + * @nr: sequential number assigned to this LPU + * @fn: property describing this LPU's functional abilities + * + * Function emits a timeline message informing about LPU creation. LPU is + * created with two attributes: number linking this LPU with GPU's job slot + * and function bearing information about this LPU abilities. + * This message is directed to timeline summary stream. + */ +#define KBASE_TLSTREAM_TL_SUMMARY_NEW_LPU(lpu, nr, fn) \ + __TRACE_IF_ENABLED(tl_summary_new_lpu, lpu, nr, fn) + +/** + * KBASE_TLSTREAM_TL_SUMMARY_LIFELINK_LPU_GPU - lifelink LPU object to GPU + * @lpu: name of the Logical Processing Unit object + * @gpu: name of the GPU object + * + * Function emits a timeline message informing that LPU object shall be deleted + * along with GPU object. + * This message is directed to timeline summary stream. + */ +#define KBASE_TLSTREAM_TL_SUMMARY_LIFELINK_LPU_GPU(lpu, gpu) \ + __TRACE_IF_ENABLED(tl_summary_lifelink_lpu_gpu, lpu, gpu) + +/** + * KBASE_TLSTREAM_TL_SUMMARY_NEW_AS - create address space object in timeline summary + * @as: name of the address space object + * @nr: sequential number assigned to this address space + * + * Function emits a timeline message informing about address space creation. + * Address space is created with one attribute: number identifying this + * address space. + * This message is directed to timeline summary stream. + */ +#define KBASE_TLSTREAM_TL_SUMMARY_NEW_AS(as, nr) \ + __TRACE_IF_ENABLED(tl_summary_new_as, as, nr) + +/** + * KBASE_TLSTREAM_TL_SUMMARY_LIFELINK_AS_GPU - lifelink address space object to GPU + * @as: name of the address space object + * @gpu: name of the GPU object + * + * Function emits a timeline message informing that address space object + * shall be deleted along with GPU object. + * This message is directed to timeline summary stream. + */ +#define KBASE_TLSTREAM_TL_SUMMARY_LIFELINK_AS_GPU(as, gpu) \ + __TRACE_IF_ENABLED(tl_summary_lifelink_as_gpu, as, gpu) + +/** + * KBASE_TLSTREAM_TL_NEW_CTX - create context object in timeline + * @context: name of the context object + * @nr: context number + * @tgid: thread Group Id + * + * Function emits a timeline message informing about context creation. Context + * is created with context number (its attribute), that can be used to link + * kbase context with userspace context. + */ +#define KBASE_TLSTREAM_TL_NEW_CTX(context, nr, tgid) \ + __TRACE_IF_ENABLED(tl_new_ctx, context, nr, tgid) + +/** + * KBASE_TLSTREAM_TL_NEW_ATOM - create atom object in timeline + * @atom: name of the atom object + * @nr: sequential number assigned to this atom + * + * Function emits a timeline message informing about atom creation. Atom is + * created with atom number (its attribute) that links it with actual work + * bucket id understood by hardware. + */ +#define KBASE_TLSTREAM_TL_NEW_ATOM(atom, nr) \ + __TRACE_IF_ENABLED(tl_new_atom, atom, nr) + +/** + * KBASE_TLSTREAM_TL_DEL_CTX - destroy context object in timeline + * @context: name of the context object + * + * Function emits a timeline message informing that context object ceased to + * exist. + */ +#define KBASE_TLSTREAM_TL_DEL_CTX(context) \ + __TRACE_IF_ENABLED(tl_del_ctx, context) + +/** + * KBASE_TLSTREAM_TL_DEL_ATOM - destroy atom object in timeline + * @atom: name of the atom object + * + * Function emits a timeline message informing that atom object ceased to + * exist. + */ +#define KBASE_TLSTREAM_TL_DEL_ATOM(atom) \ + __TRACE_IF_ENABLED(tl_del_atom, atom) + +/** + * KBASE_TLSTREAM_TL_RET_CTX_LPU - retain context by LPU + * @context: name of the context object + * @lpu: name of the Logical Processing Unit object + * + * Function emits a timeline message informing that context is being held + * by LPU and must not be deleted unless it is released. + */ +#define KBASE_TLSTREAM_TL_RET_CTX_LPU(context, lpu) \ + __TRACE_IF_ENABLED(tl_ret_ctx_lpu, context, lpu) + +/** + * KBASE_TLSTREAM_TL_RET_ATOM_CTX - retain atom by context + * @atom: name of the atom object + * @context: name of the context object + * + * Function emits a timeline message informing that atom object is being held + * by context and must not be deleted unless it is released. + */ +#define KBASE_TLSTREAM_TL_RET_ATOM_CTX(atom, context) \ + __TRACE_IF_ENABLED(tl_ret_atom_ctx, atom, context) + +/** + * KBASE_TLSTREAM_TL_RET_ATOM_LPU - retain atom by LPU + * @atom: name of the atom object + * @lpu: name of the Logical Processing Unit object + * @attrib_match_list: list containing match operator attributes + * + * Function emits a timeline message informing that atom object is being held + * by LPU and must not be deleted unless it is released. + */ +#define KBASE_TLSTREAM_TL_RET_ATOM_LPU(atom, lpu, attrib_match_list) \ + __TRACE_IF_ENABLED(tl_ret_atom_lpu, atom, lpu, attrib_match_list) + +/** + * KBASE_TLSTREAM_TL_NRET_CTX_LPU - release context by LPU + * @context: name of the context object + * @lpu: name of the Logical Processing Unit object + * + * Function emits a timeline message informing that context is being released + * by LPU object. + */ +#define KBASE_TLSTREAM_TL_NRET_CTX_LPU(context, lpu) \ + __TRACE_IF_ENABLED(tl_nret_ctx_lpu, context, lpu) + +/** + * KBASE_TLSTREAM_TL_NRET_ATOM_CTX - release atom by context + * @atom: name of the atom object + * @context: name of the context object + * + * Function emits a timeline message informing that atom object is being + * released by context. + */ +#define KBASE_TLSTREAM_TL_NRET_ATOM_CTX(atom, context) \ + __TRACE_IF_ENABLED(tl_nret_atom_ctx, atom, context) + +/** + * KBASE_TLSTREAM_TL_NRET_ATOM_LPU - release atom by LPU + * @atom: name of the atom object + * @lpu: name of the Logical Processing Unit object + * + * Function emits a timeline message informing that atom object is being + * released by LPU. + */ +#define KBASE_TLSTREAM_TL_NRET_ATOM_LPU(atom, lpu) \ + __TRACE_IF_ENABLED(tl_nret_atom_lpu, atom, lpu) + +/** + * KBASE_TLSTREAM_TL_RET_AS_CTX - lifelink address space object to context + * @as: name of the address space object + * @ctx: name of the context object + * + * Function emits a timeline message informing that address space object + * is being held by the context object. + */ +#define KBASE_TLSTREAM_TL_RET_AS_CTX(as, ctx) \ + __TRACE_IF_ENABLED(tl_ret_as_ctx, as, ctx) + +/** + * KBASE_TLSTREAM_TL_NRET_AS_CTX - release address space by context + * @as: name of the address space object + * @ctx: name of the context object + * + * Function emits a timeline message informing that address space object + * is being released by atom. + */ +#define KBASE_TLSTREAM_TL_NRET_AS_CTX(as, ctx) \ + __TRACE_IF_ENABLED(tl_nret_as_ctx, as, ctx) + +/** + * KBASE_TLSTREAM_TL_RET_ATOM_AS - retain atom by address space + * @atom: name of the atom object + * @as: name of the address space object + * + * Function emits a timeline message informing that atom object is being held + * by address space and must not be deleted unless it is released. + */ +#define KBASE_TLSTREAM_TL_RET_ATOM_AS(atom, as) \ + __TRACE_IF_ENABLED(tl_ret_atom_as, atom, as) + +/** + * KBASE_TLSTREAM_TL_NRET_ATOM_AS - release atom by address space + * @atom: name of the atom object + * @as: name of the address space object + * + * Function emits a timeline message informing that atom object is being + * released by address space. + */ +#define KBASE_TLSTREAM_TL_NRET_ATOM_AS(atom, as) \ + __TRACE_IF_ENABLED(tl_nret_atom_as, atom, as) + +/** + * KBASE_TLSTREAM_TL_DEP_ATOM_ATOM - parent atom depends on child atom + * @atom1: name of the child atom object + * @atom2: name of the parent atom object that depends on child atom + * + * Function emits a timeline message informing that parent atom waits for + * child atom object to be completed before start its execution. + */ +#define KBASE_TLSTREAM_TL_DEP_ATOM_ATOM(atom1, atom2) \ + __TRACE_IF_ENABLED(tl_dep_atom_atom, atom1, atom2) + +/** + * KBASE_TLSTREAM_TL_NDEP_ATOM_ATOM - dependency between atoms resolved + * @atom1: name of the child atom object + * @atom2: name of the parent atom object that depended on child atom + * + * Function emits a timeline message informing that parent atom execution + * dependency on child atom has been resolved. + */ +#define KBASE_TLSTREAM_TL_NDEP_ATOM_ATOM(atom1, atom2) \ + __TRACE_IF_ENABLED(tl_ndep_atom_atom, atom1, atom2) + +/** + * KBASE_TLSTREAM_TL_RDEP_ATOM_ATOM - information about already resolved dependency between atoms + * @atom1: name of the child atom object + * @atom2: name of the parent atom object that depended on child atom + * + * Function emits a timeline message informing that parent atom execution + * dependency on child atom has been resolved. + */ +#define KBASE_TLSTREAM_TL_RDEP_ATOM_ATOM(atom1, atom2) \ + __TRACE_IF_ENABLED(tl_rdep_atom_atom, atom1, atom2) + +/** + * KBASE_TLSTREAM_TL_ATTRIB_ATOM_CONFIG - atom job slot attributes + * @atom: name of the atom object + * @jd: job descriptor address + * @affinity: job affinity + * @config: job config + * + * Function emits a timeline message containing atom attributes. + */ +#define KBASE_TLSTREAM_TL_ATTRIB_ATOM_CONFIG(atom, jd, affinity, config) \ + __TRACE_IF_ENABLED(tl_attrib_atom_config, atom, jd, affinity, config) + +/** + * KBASE_TLSTREAM_TL_ATTRIB_ATOM_PRIORITY - atom priority + * @atom: name of the atom object + * @prio: atom priority + * + * Function emits a timeline message containing atom priority. + */ +#define KBASE_TLSTREAM_TL_ATTRIB_ATOM_PRIORITY(atom, prio) \ + __TRACE_IF_ENABLED_LATENCY(tl_attrib_atom_priority, atom, prio) + +/** + * KBASE_TLSTREAM_TL_ATTRIB_ATOM_STATE - atom state + * @atom: name of the atom object + * @state: atom state + * + * Function emits a timeline message containing atom state. + */ +#define KBASE_TLSTREAM_TL_ATTRIB_ATOM_STATE(atom, state) \ + __TRACE_IF_ENABLED_LATENCY(tl_attrib_atom_state, atom, state) + +/** + * KBASE_TLSTREAM_TL_ATTRIB_ATOM_PRIORITY_CHANGE - atom caused priority change + * @atom: name of the atom object + * + * Function emits a timeline message signalling priority change + */ +#define KBASE_TLSTREAM_TL_ATTRIB_ATOM_PRIORITY_CHANGE(atom) \ + __TRACE_IF_ENABLED_LATENCY(tl_attrib_atom_priority_change, atom) + +/** + * KBASE_TLSTREAM_TL_ATTRIB_ATOM_JIT - jit happened on atom + * @atom: atom identifier + * @edit_addr: address edited by jit + * @new_addr: address placed into the edited location + */ +#define KBASE_TLSTREAM_TL_ATTRIB_ATOM_JIT(atom, edit_addr, new_addr) \ + __TRACE_IF_ENABLED_JD(tl_attrib_atom_jit, atom, edit_addr, new_addr) + +/** + * KBASE_TLSTREAM_TL_ATTRIB_AS_CONFIG - address space attributes + * @as: assigned address space + * @transtab: configuration of the TRANSTAB register + * @memattr: configuration of the MEMATTR register + * @transcfg: configuration of the TRANSCFG register (or zero if not present) + * + * Function emits a timeline message containing address space attributes. + */ +#define KBASE_TLSTREAM_TL_ATTRIB_AS_CONFIG(as, transtab, memattr, transcfg) \ + __TRACE_IF_ENABLED(tl_attrib_as_config, as, transtab, memattr, transcfg) + +/** + * KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTSTOP_ex + * @atom: atom identifier + */ +#define KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTSTOP_EX(atom) \ + __TRACE_IF_ENABLED(tl_event_atom_softstop_ex, atom) + +/** + * KBASE_TLSTREAM_TL_EVENT_LPU_softstop + * @lpu: name of the LPU object + */ +#define KBASE_TLSTREAM_TL_EVENT_LPU_SOFTSTOP(lpu) \ + __TRACE_IF_ENABLED(tl_event_lpu_softstop, lpu) + +/** + * KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTSTOP_issue + * @atom: atom identifier + */ +#define KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTSTOP_ISSUE(atom) \ + __TRACE_IF_ENABLED(tl_event_atom_softstop_issue, atom) + +/** + * KBASE_TLSTREAM_JD_GPU_SOFT_RESET - The GPU is being soft reset + * @gpu: name of the GPU object + * + * This imperative tracepoint is specific to job dumping. + * Function emits a timeline message indicating GPU soft reset. + */ +#define KBASE_TLSTREAM_JD_GPU_SOFT_RESET(gpu) \ + __TRACE_IF_ENABLED(jd_gpu_soft_reset, gpu) + + +/** + * KBASE_TLSTREAM_AUX_PM_STATE - timeline message: power management state + * @core_type: core type (shader, tiler, l2 cache, l3 cache) + * @state: 64bits bitmask reporting power state of the cores (1-ON, 0-OFF) + */ +#define KBASE_TLSTREAM_AUX_PM_STATE(core_type, state) \ + __TRACE_IF_ENABLED(aux_pm_state, core_type, state) + +/** + * KBASE_TLSTREAM_AUX_PAGEFAULT - timeline message: MMU page fault event + * resulting in new pages being mapped + * @ctx_nr: kernel context number + * @page_count_change: number of pages to be added + */ +#define KBASE_TLSTREAM_AUX_PAGEFAULT(ctx_nr, page_count_change) \ + __TRACE_IF_ENABLED(aux_pagefault, ctx_nr, page_count_change) + +/** + * KBASE_TLSTREAM_AUX_PAGESALLOC - timeline message: total number of allocated + * pages is changed + * @ctx_nr: kernel context number + * @page_count: number of pages used by the context + */ +#define KBASE_TLSTREAM_AUX_PAGESALLOC(ctx_nr, page_count) \ + __TRACE_IF_ENABLED(aux_pagesalloc, ctx_nr, page_count) + +/** + * KBASE_TLSTREAM_AUX_DEVFREQ_TARGET - timeline message: new target DVFS + * frequency + * @target_freq: new target frequency + */ +#define KBASE_TLSTREAM_AUX_DEVFREQ_TARGET(target_freq) \ + __TRACE_IF_ENABLED(aux_devfreq_target, target_freq) + +/** + * KBASE_TLSTREAM_AUX_PROTECTED_ENTER_START - The GPU has started transitioning + * to protected mode + * @gpu: name of the GPU object + * + * Function emits a timeline message indicating the GPU is starting to + * transition to protected mode. + */ +#define KBASE_TLSTREAM_AUX_PROTECTED_ENTER_START(gpu) \ + __TRACE_IF_ENABLED_LATENCY(aux_protected_enter_start, gpu) + +/** + * KBASE_TLSTREAM_AUX_PROTECTED_ENTER_END - The GPU has finished transitioning + * to protected mode + * @gpu: name of the GPU object + * + * Function emits a timeline message indicating the GPU has finished + * transitioning to protected mode. + */ +#define KBASE_TLSTREAM_AUX_PROTECTED_ENTER_END(gpu) \ + __TRACE_IF_ENABLED_LATENCY(aux_protected_enter_end, gpu) + +/** + * KBASE_TLSTREAM_AUX_PROTECTED_LEAVE_START - The GPU has started transitioning + * to non-protected mode + * @gpu: name of the GPU object + * + * Function emits a timeline message indicating the GPU is starting to + * transition to non-protected mode. + */ +#define KBASE_TLSTREAM_AUX_PROTECTED_LEAVE_START(gpu) \ + __TRACE_IF_ENABLED_LATENCY(aux_protected_leave_start, gpu) + +/** + * KBASE_TLSTREAM_AUX_PROTECTED_LEAVE_END - The GPU has finished transitioning + * to non-protected mode + * @gpu: name of the GPU object + * + * Function emits a timeline message indicating the GPU has finished + * transitioning to non-protected mode. + */ +#define KBASE_TLSTREAM_AUX_PROTECTED_LEAVE_END(gpu) \ + __TRACE_IF_ENABLED_LATENCY(aux_protected_leave_end, gpu) + +#endif /* _KBASE_TLSTREAM_H */ + diff --git a/drivers/gpu/arm/midgard/mali_kbase_trace_defs.h b/drivers/gpu/arm/midgard/mali_kbase_trace_defs.h new file mode 100644 index 00000000000000..e2e0544208cea1 --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_trace_defs.h @@ -0,0 +1,264 @@ +/* + * + * (C) COPYRIGHT 2011-2014 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + + +/* ***** IMPORTANT: THIS IS NOT A NORMAL HEADER FILE ***** + * ***** DO NOT INCLUDE DIRECTLY ***** + * ***** THE LACK OF HEADER GUARDS IS INTENTIONAL ***** */ + +/* + * The purpose of this header file is just to contain a list of trace code idenitifers + * + * Each identifier is wrapped in a macro, so that its string form and enum form can be created + * + * Each macro is separated with a comma, to allow insertion into an array initializer or enum definition block. + * + * This allows automatic creation of an enum and a corresponding array of strings + * + * Before #including, the includer MUST #define KBASE_TRACE_CODE_MAKE_CODE. + * After #including, the includer MUST #under KBASE_TRACE_CODE_MAKE_CODE. + * + * e.g.: + * #define KBASE_TRACE_CODE( X ) KBASE_TRACE_CODE_ ## X + * typedef enum + * { + * #define KBASE_TRACE_CODE_MAKE_CODE( X ) KBASE_TRACE_CODE( X ) + * #include "mali_kbase_trace_defs.h" + * #undef KBASE_TRACE_CODE_MAKE_CODE + * } kbase_trace_code; + * + * IMPORTANT: THIS FILE MUST NOT BE USED FOR ANY OTHER PURPOSE OTHER THAN THE ABOVE + * + * + * The use of the macro here is: + * - KBASE_TRACE_CODE_MAKE_CODE( X ) + * + * Which produces: + * - For an enum, KBASE_TRACE_CODE_X + * - For a string, "X" + * + * + * For example: + * - KBASE_TRACE_CODE_MAKE_CODE( JM_JOB_COMPLETE ) expands to: + * - KBASE_TRACE_CODE_JM_JOB_COMPLETE for the enum + * - "JM_JOB_COMPLETE" for the string + * - To use it to trace an event, do: + * - KBASE_TRACE_ADD( kbdev, JM_JOB_COMPLETE, subcode, kctx, uatom, val ); + */ + +#if 0 /* Dummy section to avoid breaking formatting */ +int dummy_array[] = { +#endif + +/* + * Core events + */ + /* no info_val, no gpu_addr, no atom */ + KBASE_TRACE_CODE_MAKE_CODE(CORE_CTX_DESTROY), + /* no info_val, no gpu_addr, no atom */ + KBASE_TRACE_CODE_MAKE_CODE(CORE_CTX_HWINSTR_TERM), + /* info_val == GPU_IRQ_STATUS register */ + KBASE_TRACE_CODE_MAKE_CODE(CORE_GPU_IRQ), + /* info_val == bits cleared */ + KBASE_TRACE_CODE_MAKE_CODE(CORE_GPU_IRQ_CLEAR), + /* info_val == GPU_IRQ_STATUS register */ + KBASE_TRACE_CODE_MAKE_CODE(CORE_GPU_IRQ_DONE), + KBASE_TRACE_CODE_MAKE_CODE(CORE_GPU_SOFT_RESET), + KBASE_TRACE_CODE_MAKE_CODE(CORE_GPU_HARD_RESET), + KBASE_TRACE_CODE_MAKE_CODE(CORE_GPU_PRFCNT_CLEAR), + /* GPU addr==dump address */ + KBASE_TRACE_CODE_MAKE_CODE(CORE_GPU_PRFCNT_SAMPLE), + KBASE_TRACE_CODE_MAKE_CODE(CORE_GPU_CLEAN_INV_CACHES), +/* + * Job Slot management events + */ + /* info_val==irq rawstat at start */ + KBASE_TRACE_CODE_MAKE_CODE(JM_IRQ), + /* info_val==jobs processed */ + KBASE_TRACE_CODE_MAKE_CODE(JM_IRQ_END), +/* In the following: + * + * - ctx is set if a corresponding job found (NULL otherwise, e.g. some soft-stop cases) + * - uatom==kernel-side mapped uatom address (for correlation with user-side) + */ + /* info_val==exit code; gpu_addr==chain gpuaddr */ + KBASE_TRACE_CODE_MAKE_CODE(JM_JOB_DONE), + /* gpu_addr==JS_HEAD_NEXT written, info_val==lower 32 bits of affinity */ + KBASE_TRACE_CODE_MAKE_CODE(JM_SUBMIT), + /* gpu_addr is as follows: + * - If JS_STATUS active after soft-stop, val==gpu addr written to + * JS_HEAD on submit + * - otherwise gpu_addr==0 */ + KBASE_TRACE_CODE_MAKE_CODE(JM_SOFTSTOP), + KBASE_TRACE_CODE_MAKE_CODE(JM_SOFTSTOP_0), + KBASE_TRACE_CODE_MAKE_CODE(JM_SOFTSTOP_1), + /* gpu_addr==JS_HEAD read */ + KBASE_TRACE_CODE_MAKE_CODE(JM_HARDSTOP), + /* gpu_addr==JS_HEAD read */ + KBASE_TRACE_CODE_MAKE_CODE(JM_HARDSTOP_0), + /* gpu_addr==JS_HEAD read */ + KBASE_TRACE_CODE_MAKE_CODE(JM_HARDSTOP_1), + /* gpu_addr==JS_TAIL read */ + KBASE_TRACE_CODE_MAKE_CODE(JM_UPDATE_HEAD), +/* gpu_addr is as follows: + * - If JS_STATUS active before soft-stop, val==JS_HEAD + * - otherwise gpu_addr==0 + */ + /* gpu_addr==JS_HEAD read */ + KBASE_TRACE_CODE_MAKE_CODE(JM_CHECK_HEAD), + KBASE_TRACE_CODE_MAKE_CODE(JM_FLUSH_WORKQS), + KBASE_TRACE_CODE_MAKE_CODE(JM_FLUSH_WORKQS_DONE), + /* info_val == is_scheduled */ + KBASE_TRACE_CODE_MAKE_CODE(JM_ZAP_NON_SCHEDULED), + /* info_val == is_scheduled */ + KBASE_TRACE_CODE_MAKE_CODE(JM_ZAP_SCHEDULED), + KBASE_TRACE_CODE_MAKE_CODE(JM_ZAP_DONE), + /* info_val == nr jobs submitted */ + KBASE_TRACE_CODE_MAKE_CODE(JM_SLOT_SOFT_OR_HARD_STOP), + /* gpu_addr==JS_HEAD_NEXT last written */ + KBASE_TRACE_CODE_MAKE_CODE(JM_SLOT_EVICT), + KBASE_TRACE_CODE_MAKE_CODE(JM_SUBMIT_AFTER_RESET), + KBASE_TRACE_CODE_MAKE_CODE(JM_BEGIN_RESET_WORKER), + KBASE_TRACE_CODE_MAKE_CODE(JM_END_RESET_WORKER), +/* + * Job dispatch events + */ + /* gpu_addr==value to write into JS_HEAD */ + KBASE_TRACE_CODE_MAKE_CODE(JD_DONE), + /* gpu_addr==value to write into JS_HEAD */ + KBASE_TRACE_CODE_MAKE_CODE(JD_DONE_WORKER), + /* gpu_addr==value to write into JS_HEAD */ + KBASE_TRACE_CODE_MAKE_CODE(JD_DONE_WORKER_END), + /* gpu_addr==value to write into JS_HEAD */ + KBASE_TRACE_CODE_MAKE_CODE(JD_DONE_TRY_RUN_NEXT_JOB), + /* gpu_addr==0, info_val==0, uatom==0 */ + KBASE_TRACE_CODE_MAKE_CODE(JD_ZAP_CONTEXT), + /* gpu_addr==value to write into JS_HEAD */ + KBASE_TRACE_CODE_MAKE_CODE(JD_CANCEL), + /* gpu_addr==value to write into JS_HEAD */ + KBASE_TRACE_CODE_MAKE_CODE(JD_CANCEL_WORKER), +/* + * Scheduler Core events + */ + KBASE_TRACE_CODE_MAKE_CODE(JS_RETAIN_CTX_NOLOCK), + /* gpu_addr==value to write into JS_HEAD */ + KBASE_TRACE_CODE_MAKE_CODE(JS_ADD_JOB), + /* gpu_addr==last value written/would be written to JS_HEAD */ + KBASE_TRACE_CODE_MAKE_CODE(JS_REMOVE_JOB), + KBASE_TRACE_CODE_MAKE_CODE(JS_RETAIN_CTX), + KBASE_TRACE_CODE_MAKE_CODE(JS_RELEASE_CTX), + KBASE_TRACE_CODE_MAKE_CODE(JS_TRY_SCHEDULE_HEAD_CTX), + /* gpu_addr==value to write into JS_HEAD */ + KBASE_TRACE_CODE_MAKE_CODE(JS_JOB_DONE_TRY_RUN_NEXT_JOB), + /* gpu_addr==value to write into JS_HEAD */ + KBASE_TRACE_CODE_MAKE_CODE(JS_JOB_DONE_RETRY_NEEDED), + /* kctx is the one being evicted, info_val == kctx to put in */ + KBASE_TRACE_CODE_MAKE_CODE(JS_FAST_START_EVICTS_CTX), + KBASE_TRACE_CODE_MAKE_CODE(JS_AFFINITY_SUBMIT_TO_BLOCKED), + /* info_val == lower 32 bits of affinity */ + KBASE_TRACE_CODE_MAKE_CODE(JS_AFFINITY_CURRENT), + /* info_val == lower 32 bits of affinity */ + KBASE_TRACE_CODE_MAKE_CODE(JS_CORE_REF_REQUEST_CORES_FAILED), + /* info_val == lower 32 bits of affinity */ + KBASE_TRACE_CODE_MAKE_CODE(JS_CORE_REF_REGISTER_INUSE_FAILED), + /* info_val == lower 32 bits of rechecked affinity */ + KBASE_TRACE_CODE_MAKE_CODE(JS_CORE_REF_REQUEST_ON_RECHECK_FAILED), + /* info_val == lower 32 bits of rechecked affinity */ + KBASE_TRACE_CODE_MAKE_CODE(JS_CORE_REF_REGISTER_ON_RECHECK_FAILED), + /* info_val == lower 32 bits of affinity */ + KBASE_TRACE_CODE_MAKE_CODE(JS_CORE_REF_AFFINITY_WOULD_VIOLATE), + /* info_val == the ctx attribute now on ctx */ + KBASE_TRACE_CODE_MAKE_CODE(JS_CTX_ATTR_NOW_ON_CTX), + /* info_val == the ctx attribute now on runpool */ + KBASE_TRACE_CODE_MAKE_CODE(JS_CTX_ATTR_NOW_ON_RUNPOOL), + /* info_val == the ctx attribute now off ctx */ + KBASE_TRACE_CODE_MAKE_CODE(JS_CTX_ATTR_NOW_OFF_CTX), + /* info_val == the ctx attribute now off runpool */ + KBASE_TRACE_CODE_MAKE_CODE(JS_CTX_ATTR_NOW_OFF_RUNPOOL), +/* + * Scheduler Policy events + */ + KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_INIT_CTX), + KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_TERM_CTX), + /* info_val == whether it was evicted */ + KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_TRY_EVICT_CTX), + KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_FOREACH_CTX_JOBS), + KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_ENQUEUE_CTX), + KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_DEQUEUE_HEAD_CTX), + KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_RUNPOOL_ADD_CTX), + KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_RUNPOOL_REMOVE_CTX), + KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_DEQUEUE_JOB), + KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_DEQUEUE_JOB_IRQ), + /* gpu_addr==JS_HEAD to write if the job were run */ + KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_ENQUEUE_JOB), + KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_TIMER_START), + KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_TIMER_END), +/* + * Power Management Events + */ + KBASE_TRACE_CODE_MAKE_CODE(PM_JOB_SUBMIT_AFTER_POWERING_UP), + KBASE_TRACE_CODE_MAKE_CODE(PM_JOB_SUBMIT_AFTER_POWERED_UP), + KBASE_TRACE_CODE_MAKE_CODE(PM_PWRON), + KBASE_TRACE_CODE_MAKE_CODE(PM_PWRON_TILER), + KBASE_TRACE_CODE_MAKE_CODE(PM_PWRON_L2), + KBASE_TRACE_CODE_MAKE_CODE(PM_PWROFF), + KBASE_TRACE_CODE_MAKE_CODE(PM_PWROFF_TILER), + KBASE_TRACE_CODE_MAKE_CODE(PM_PWROFF_L2), + KBASE_TRACE_CODE_MAKE_CODE(PM_CORES_POWERED), + KBASE_TRACE_CODE_MAKE_CODE(PM_CORES_POWERED_TILER), + KBASE_TRACE_CODE_MAKE_CODE(PM_CORES_POWERED_L2), + KBASE_TRACE_CODE_MAKE_CODE(PM_CORES_CHANGE_DESIRED), + KBASE_TRACE_CODE_MAKE_CODE(PM_CORES_CHANGE_DESIRED_TILER), + KBASE_TRACE_CODE_MAKE_CODE(PM_CORES_CHANGE_AVAILABLE), + KBASE_TRACE_CODE_MAKE_CODE(PM_CORES_CHANGE_AVAILABLE_TILER), + KBASE_TRACE_CODE_MAKE_CODE(PM_CORES_AVAILABLE), + KBASE_TRACE_CODE_MAKE_CODE(PM_CORES_AVAILABLE_TILER), + /* PM_DESIRED_REACHED: gpu_addr == pm.gpu_in_desired_state */ + KBASE_TRACE_CODE_MAKE_CODE(PM_DESIRED_REACHED), + KBASE_TRACE_CODE_MAKE_CODE(PM_DESIRED_REACHED_TILER), + KBASE_TRACE_CODE_MAKE_CODE(PM_REGISTER_CHANGE_SHADER_INUSE), + KBASE_TRACE_CODE_MAKE_CODE(PM_REGISTER_CHANGE_TILER_INUSE), + KBASE_TRACE_CODE_MAKE_CODE(PM_REGISTER_CHANGE_SHADER_NEEDED), + KBASE_TRACE_CODE_MAKE_CODE(PM_REGISTER_CHANGE_TILER_NEEDED), + KBASE_TRACE_CODE_MAKE_CODE(PM_RELEASE_CHANGE_SHADER_INUSE), + KBASE_TRACE_CODE_MAKE_CODE(PM_RELEASE_CHANGE_TILER_INUSE), + KBASE_TRACE_CODE_MAKE_CODE(PM_UNREQUEST_CHANGE_SHADER_NEEDED), + KBASE_TRACE_CODE_MAKE_CODE(PM_UNREQUEST_CHANGE_TILER_NEEDED), + KBASE_TRACE_CODE_MAKE_CODE(PM_REQUEST_CHANGE_SHADER_NEEDED), + KBASE_TRACE_CODE_MAKE_CODE(PM_REQUEST_CHANGE_TILER_NEEDED), + KBASE_TRACE_CODE_MAKE_CODE(PM_WAKE_WAITERS), + KBASE_TRACE_CODE_MAKE_CODE(PM_CONTEXT_ACTIVE), + KBASE_TRACE_CODE_MAKE_CODE(PM_CONTEXT_IDLE), + KBASE_TRACE_CODE_MAKE_CODE(PM_GPU_ON), + KBASE_TRACE_CODE_MAKE_CODE(PM_GPU_OFF), + /* info_val == policy number, or -1 for "Already changing" */ + KBASE_TRACE_CODE_MAKE_CODE(PM_SET_POLICY), + KBASE_TRACE_CODE_MAKE_CODE(PM_CA_SET_POLICY), + /* info_val == policy number */ + KBASE_TRACE_CODE_MAKE_CODE(PM_CURRENT_POLICY_INIT), + /* info_val == policy number */ + KBASE_TRACE_CODE_MAKE_CODE(PM_CURRENT_POLICY_TERM), +/* Unused code just to make it easier to not have a comma at the end. + * All other codes MUST come before this */ + KBASE_TRACE_CODE_MAKE_CODE(DUMMY) + +#if 0 /* Dummy section to avoid breaking formatting */ +}; +#endif + +/* ***** THE LACK OF HEADER GUARDS IS INTENTIONAL ***** */ diff --git a/drivers/gpu/arm/midgard/mali_kbase_trace_timeline.c b/drivers/gpu/arm/midgard/mali_kbase_trace_timeline.c new file mode 100644 index 00000000000000..5830e87f0818b6 --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_trace_timeline.c @@ -0,0 +1,236 @@ +/* + * + * (C) COPYRIGHT 2012-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + + +#include +#include +#include + +#define CREATE_TRACE_POINTS + +#ifdef CONFIG_MALI_TRACE_TIMELINE +#include "mali_timeline.h" + +#include +#include + +EXPORT_TRACEPOINT_SYMBOL_GPL(mali_timeline_atoms_in_flight); +EXPORT_TRACEPOINT_SYMBOL_GPL(mali_timeline_atom); +EXPORT_TRACEPOINT_SYMBOL_GPL(mali_timeline_gpu_slot_active); +EXPORT_TRACEPOINT_SYMBOL_GPL(mali_timeline_gpu_slot_action); +EXPORT_TRACEPOINT_SYMBOL_GPL(mali_timeline_gpu_power_active); +EXPORT_TRACEPOINT_SYMBOL_GPL(mali_timeline_l2_power_active); +EXPORT_TRACEPOINT_SYMBOL_GPL(mali_timeline_pm_event); +EXPORT_TRACEPOINT_SYMBOL_GPL(mali_timeline_slot_atom); +EXPORT_TRACEPOINT_SYMBOL_GPL(mali_timeline_pm_checktrans); +EXPORT_TRACEPOINT_SYMBOL_GPL(mali_timeline_context_active); + +struct kbase_trace_timeline_desc { + char *enum_str; + char *desc; + char *format; + char *format_desc; +}; + +static struct kbase_trace_timeline_desc kbase_trace_timeline_desc_table[] = { + #define KBASE_TIMELINE_TRACE_CODE(enum_val, desc, format, format_desc) { #enum_val, desc, format, format_desc } + #include "mali_kbase_trace_timeline_defs.h" + #undef KBASE_TIMELINE_TRACE_CODE +}; + +#define KBASE_NR_TRACE_CODES ARRAY_SIZE(kbase_trace_timeline_desc_table) + +static void *kbasep_trace_timeline_seq_start(struct seq_file *s, loff_t *pos) +{ + if (*pos >= KBASE_NR_TRACE_CODES) + return NULL; + + return &kbase_trace_timeline_desc_table[*pos]; +} + +static void kbasep_trace_timeline_seq_stop(struct seq_file *s, void *data) +{ +} + +static void *kbasep_trace_timeline_seq_next(struct seq_file *s, void *data, loff_t *pos) +{ + (*pos)++; + + if (*pos == KBASE_NR_TRACE_CODES) + return NULL; + + return &kbase_trace_timeline_desc_table[*pos]; +} + +static int kbasep_trace_timeline_seq_show(struct seq_file *s, void *data) +{ + struct kbase_trace_timeline_desc *trace_desc = data; + + seq_printf(s, "%s#%s#%s#%s\n", trace_desc->enum_str, trace_desc->desc, trace_desc->format, trace_desc->format_desc); + return 0; +} + + +static const struct seq_operations kbasep_trace_timeline_seq_ops = { + .start = kbasep_trace_timeline_seq_start, + .next = kbasep_trace_timeline_seq_next, + .stop = kbasep_trace_timeline_seq_stop, + .show = kbasep_trace_timeline_seq_show, +}; + +static int kbasep_trace_timeline_debugfs_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &kbasep_trace_timeline_seq_ops); +} + +static const struct file_operations kbasep_trace_timeline_debugfs_fops = { + .open = kbasep_trace_timeline_debugfs_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +#ifdef CONFIG_DEBUG_FS + +void kbasep_trace_timeline_debugfs_init(struct kbase_device *kbdev) +{ + debugfs_create_file("mali_timeline_defs", + S_IRUGO, kbdev->mali_debugfs_directory, NULL, + &kbasep_trace_timeline_debugfs_fops); +} + +#endif /* CONFIG_DEBUG_FS */ + +void kbase_timeline_job_slot_submit(struct kbase_device *kbdev, struct kbase_context *kctx, + struct kbase_jd_atom *katom, int js) +{ + lockdep_assert_held(&kbdev->hwaccess_lock); + + if (kbdev->timeline.slot_atoms_submitted[js] > 0) { + KBASE_TIMELINE_JOB_START_NEXT(kctx, js, 1); + } else { + base_atom_id atom_number = kbase_jd_atom_id(kctx, katom); + + KBASE_TIMELINE_JOB_START_HEAD(kctx, js, 1); + KBASE_TIMELINE_JOB_START(kctx, js, atom_number); + } + ++kbdev->timeline.slot_atoms_submitted[js]; + + KBASE_TIMELINE_ATOMS_SUBMITTED(kctx, js, kbdev->timeline.slot_atoms_submitted[js]); +} + +void kbase_timeline_job_slot_done(struct kbase_device *kbdev, struct kbase_context *kctx, + struct kbase_jd_atom *katom, int js, + kbasep_js_atom_done_code done_code) +{ + lockdep_assert_held(&kbdev->hwaccess_lock); + + if (done_code & KBASE_JS_ATOM_DONE_EVICTED_FROM_NEXT) { + KBASE_TIMELINE_JOB_START_NEXT(kctx, js, 0); + } else { + /* Job finished in JS_HEAD */ + base_atom_id atom_number = kbase_jd_atom_id(kctx, katom); + + KBASE_TIMELINE_JOB_START_HEAD(kctx, js, 0); + KBASE_TIMELINE_JOB_STOP(kctx, js, atom_number); + + /* see if we need to trace the job in JS_NEXT moving to JS_HEAD */ + if (kbase_backend_nr_atoms_submitted(kbdev, js)) { + struct kbase_jd_atom *next_katom; + struct kbase_context *next_kctx; + + /* Peek the next atom - note that the atom in JS_HEAD will already + * have been dequeued */ + next_katom = kbase_backend_inspect_head(kbdev, js); + WARN_ON(!next_katom); + next_kctx = next_katom->kctx; + KBASE_TIMELINE_JOB_START_NEXT(next_kctx, js, 0); + KBASE_TIMELINE_JOB_START_HEAD(next_kctx, js, 1); + KBASE_TIMELINE_JOB_START(next_kctx, js, kbase_jd_atom_id(next_kctx, next_katom)); + } + } + + --kbdev->timeline.slot_atoms_submitted[js]; + + KBASE_TIMELINE_ATOMS_SUBMITTED(kctx, js, kbdev->timeline.slot_atoms_submitted[js]); +} + +void kbase_timeline_pm_send_event(struct kbase_device *kbdev, enum kbase_timeline_pm_event event_sent) +{ + int uid = 0; + int old_uid; + + /* If a producer already exists for the event, try to use their UID (multiple-producers) */ + uid = atomic_read(&kbdev->timeline.pm_event_uid[event_sent]); + old_uid = uid; + + /* Get a new non-zero UID if we don't have one yet */ + while (!uid) + uid = atomic_inc_return(&kbdev->timeline.pm_event_uid_counter); + + /* Try to use this UID */ + if (old_uid != atomic_cmpxchg(&kbdev->timeline.pm_event_uid[event_sent], old_uid, uid)) + /* If it changed, raced with another producer: we've lost this UID */ + uid = 0; + + KBASE_TIMELINE_PM_SEND_EVENT(kbdev, event_sent, uid); +} + +void kbase_timeline_pm_check_handle_event(struct kbase_device *kbdev, enum kbase_timeline_pm_event event) +{ + int uid = atomic_read(&kbdev->timeline.pm_event_uid[event]); + + if (uid != 0) { + if (uid != atomic_cmpxchg(&kbdev->timeline.pm_event_uid[event], uid, 0)) + /* If it changed, raced with another consumer: we've lost this UID */ + uid = 0; + + KBASE_TIMELINE_PM_HANDLE_EVENT(kbdev, event, uid); + } +} + +void kbase_timeline_pm_handle_event(struct kbase_device *kbdev, enum kbase_timeline_pm_event event) +{ + int uid = atomic_read(&kbdev->timeline.pm_event_uid[event]); + + if (uid != atomic_cmpxchg(&kbdev->timeline.pm_event_uid[event], uid, 0)) + /* If it changed, raced with another consumer: we've lost this UID */ + uid = 0; + + KBASE_TIMELINE_PM_HANDLE_EVENT(kbdev, event, uid); +} + +void kbase_timeline_pm_l2_transition_start(struct kbase_device *kbdev) +{ + lockdep_assert_held(&kbdev->hwaccess_lock); + /* Simply log the start of the transition */ + kbdev->timeline.l2_transitioning = true; + KBASE_TIMELINE_POWERING_L2(kbdev); +} + +void kbase_timeline_pm_l2_transition_done(struct kbase_device *kbdev) +{ + lockdep_assert_held(&kbdev->hwaccess_lock); + /* Simply log the end of the transition */ + if (kbdev->timeline.l2_transitioning) { + kbdev->timeline.l2_transitioning = false; + KBASE_TIMELINE_POWERED_L2(kbdev); + } +} + +#endif /* CONFIG_MALI_TRACE_TIMELINE */ diff --git a/drivers/gpu/arm/midgard/mali_kbase_trace_timeline.h b/drivers/gpu/arm/midgard/mali_kbase_trace_timeline.h new file mode 100644 index 00000000000000..df47170f9dad89 --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_trace_timeline.h @@ -0,0 +1,453 @@ +/* + * + * (C) COPYRIGHT 2012-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + + +#if !defined(_KBASE_TRACE_TIMELINE_H) +#define _KBASE_TRACE_TIMELINE_H + +#ifdef CONFIG_MALI_TRACE_TIMELINE + +enum kbase_trace_timeline_code { + #define KBASE_TIMELINE_TRACE_CODE(enum_val, desc, format, format_desc) enum_val + #include "mali_kbase_trace_timeline_defs.h" + #undef KBASE_TIMELINE_TRACE_CODE +}; + +#ifdef CONFIG_DEBUG_FS + +/** Initialize Timeline DebugFS entries */ +void kbasep_trace_timeline_debugfs_init(struct kbase_device *kbdev); + +#else /* CONFIG_DEBUG_FS */ + +#define kbasep_trace_timeline_debugfs_init CSTD_NOP + +#endif /* CONFIG_DEBUG_FS */ + +/* mali_timeline.h defines kernel tracepoints used by the KBASE_TIMELINE + * functions. + * Output is timestamped by either sched_clock() (default), local_clock(), or + * cpu_clock(), depending on /sys/kernel/debug/tracing/trace_clock */ +#include "mali_timeline.h" + +/* Trace number of atoms in flight for kctx (atoms either not completed, or in + process of being returned to user */ +#define KBASE_TIMELINE_ATOMS_IN_FLIGHT(kctx, count) \ + do { \ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0) + struct timespec64 ts; \ + ktime_get_raw_ts64(&ts); \ +#else + struct timespec ts; \ + getrawmonotonic(&ts); \ +#endif + trace_mali_timeline_atoms_in_flight(ts.tv_sec, ts.tv_nsec, \ + (int)kctx->timeline.owner_tgid, \ + count); \ + } while (0) + +/* Trace atom_id being Ready to Run */ +#define KBASE_TIMELINE_ATOM_READY(kctx, atom_id) \ + do { \ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0) + struct timespec64 ts; \ + ktime_get_raw_ts64(&ts); \ +#else + struct timespec ts; \ + getrawmonotonic(&ts); \ +#endif + trace_mali_timeline_atom(ts.tv_sec, ts.tv_nsec, \ + CTX_FLOW_ATOM_READY, \ + (int)kctx->timeline.owner_tgid, \ + atom_id); \ + } while (0) + +/* Trace number of atoms submitted to job slot js + * + * NOTE: This uses a different tracepoint to the head/next/soft-stop actions, + * so that those actions can be filtered out separately from this + * + * This is because this is more useful, as we can use it to calculate general + * utilization easily and accurately */ +#define KBASE_TIMELINE_ATOMS_SUBMITTED(kctx, js, count) \ + do { \ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0) + struct timespec64 ts; \ + ktime_get_raw_ts64(&ts); \ +#else + struct timespec ts; \ + getrawmonotonic(&ts); \ +#endif + trace_mali_timeline_gpu_slot_active(ts.tv_sec, ts.tv_nsec, \ + SW_SET_GPU_SLOT_ACTIVE, \ + (int)kctx->timeline.owner_tgid, \ + js, count); \ + } while (0) + + +/* Trace atoms present in JS_NEXT */ +#define KBASE_TIMELINE_JOB_START_NEXT(kctx, js, count) \ + do { \ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0) + struct timespec64 ts; \ + ktime_get_raw_ts64(&ts); \ +#else + struct timespec ts; \ + getrawmonotonic(&ts); \ +#endif + trace_mali_timeline_gpu_slot_action(ts.tv_sec, ts.tv_nsec, \ + SW_SET_GPU_SLOT_NEXT, \ + (int)kctx->timeline.owner_tgid, \ + js, count); \ + } while (0) + +/* Trace atoms present in JS_HEAD */ +#define KBASE_TIMELINE_JOB_START_HEAD(kctx, js, count) \ + do { \ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0) + struct timespec64 ts; \ + ktime_get_raw_ts64(&ts); \ +#else + struct timespec ts; \ + getrawmonotonic(&ts); \ +#endif + trace_mali_timeline_gpu_slot_action(ts.tv_sec, ts.tv_nsec, \ + SW_SET_GPU_SLOT_HEAD, \ + (int)kctx->timeline.owner_tgid, \ + js, count); \ + } while (0) + +/* Trace that a soft stop/evict from next is being attempted on a slot */ +#define KBASE_TIMELINE_TRY_SOFT_STOP(kctx, js, count) \ + do { \ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0) + struct timespec64 ts; \ + ktime_get_raw_ts64(&ts); \ +#else + struct timespec ts; \ + getrawmonotonic(&ts); \ +#endif + trace_mali_timeline_gpu_slot_action(ts.tv_sec, ts.tv_nsec, \ + SW_SET_GPU_SLOT_STOPPING, \ + (kctx) ? (int)kctx->timeline.owner_tgid : 0, \ + js, count); \ + } while (0) + + + +/* Trace state of overall GPU power */ +#define KBASE_TIMELINE_GPU_POWER(kbdev, active) \ + do { \ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0) + struct timespec64 ts; \ + ktime_get_raw_ts64(&ts); \ +#else + struct timespec ts; \ + getrawmonotonic(&ts); \ +#endif + trace_mali_timeline_gpu_power_active(ts.tv_sec, ts.tv_nsec, \ + SW_SET_GPU_POWER_ACTIVE, active); \ + } while (0) + +/* Trace state of tiler power */ +#define KBASE_TIMELINE_POWER_TILER(kbdev, bitmap) \ + do { \ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0) + struct timespec64 ts; \ + ktime_get_raw_ts64(&ts); \ +#else + struct timespec ts; \ + getrawmonotonic(&ts); \ +#endif + trace_mali_timeline_gpu_power_active(ts.tv_sec, ts.tv_nsec, \ + SW_SET_GPU_POWER_TILER_ACTIVE, \ + hweight64(bitmap)); \ + } while (0) + +/* Trace number of shaders currently powered */ +#define KBASE_TIMELINE_POWER_SHADER(kbdev, bitmap) \ + do { \ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0) + struct timespec64 ts; \ + ktime_get_raw_ts64(&ts); \ +#else + struct timespec ts; \ + getrawmonotonic(&ts); \ +#endif + trace_mali_timeline_gpu_power_active(ts.tv_sec, ts.tv_nsec, \ + SW_SET_GPU_POWER_SHADER_ACTIVE, \ + hweight64(bitmap)); \ + } while (0) + +/* Trace state of L2 power */ +#define KBASE_TIMELINE_POWER_L2(kbdev, bitmap) \ + do { \ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0) + struct timespec64 ts; \ + ktime_get_raw_ts64(&ts); \ +#else + struct timespec ts; \ + getrawmonotonic(&ts); \ +#endif + trace_mali_timeline_gpu_power_active(ts.tv_sec, ts.tv_nsec, \ + SW_SET_GPU_POWER_L2_ACTIVE, \ + hweight64(bitmap)); \ + } while (0) + +/* Trace state of L2 cache*/ +#define KBASE_TIMELINE_POWERING_L2(kbdev) \ + do { \ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0) + struct timespec64 ts; \ + ktime_get_raw_ts64(&ts); \ +#else + struct timespec ts; \ + getrawmonotonic(&ts); \ +#endif + trace_mali_timeline_l2_power_active(ts.tv_sec, ts.tv_nsec, \ + SW_FLOW_GPU_POWER_L2_POWERING, \ + 1); \ + } while (0) + +#define KBASE_TIMELINE_POWERED_L2(kbdev) \ + do { \ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0) + struct timespec64 ts; \ + ktime_get_raw_ts64(&ts); \ +#else + struct timespec ts; \ + getrawmonotonic(&ts); \ +#endif + trace_mali_timeline_l2_power_active(ts.tv_sec, ts.tv_nsec, \ + SW_FLOW_GPU_POWER_L2_ACTIVE, \ + 1); \ + } while (0) + +/* Trace kbase_pm_send_event message send */ +#define KBASE_TIMELINE_PM_SEND_EVENT(kbdev, event_type, pm_event_id) \ + do { \ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0) + struct timespec64 ts; \ + ktime_get_raw_ts64(&ts); \ +#else + struct timespec ts; \ + getrawmonotonic(&ts); \ +#endif + trace_mali_timeline_pm_event(ts.tv_sec, ts.tv_nsec, \ + SW_FLOW_PM_SEND_EVENT, \ + event_type, pm_event_id); \ + } while (0) + +/* Trace kbase_pm_worker message receive */ +#define KBASE_TIMELINE_PM_HANDLE_EVENT(kbdev, event_type, pm_event_id) \ + do { \ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0) + struct timespec64 ts; \ + ktime_get_raw_ts64(&ts); \ +#else + struct timespec ts; \ + getrawmonotonic(&ts); \ +#endif + trace_mali_timeline_pm_event(ts.tv_sec, ts.tv_nsec, \ + SW_FLOW_PM_HANDLE_EVENT, \ + event_type, pm_event_id); \ + } while (0) + + +/* Trace atom_id starting in JS_HEAD */ +#define KBASE_TIMELINE_JOB_START(kctx, js, _consumerof_atom_number) \ + do { \ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0) + struct timespec64 ts; \ + ktime_get_raw_ts64(&ts); \ +#else + struct timespec ts; \ + getrawmonotonic(&ts); \ +#endif + trace_mali_timeline_slot_atom(ts.tv_sec, ts.tv_nsec, \ + HW_START_GPU_JOB_CHAIN_SW_APPROX, \ + (int)kctx->timeline.owner_tgid, \ + js, _consumerof_atom_number); \ + } while (0) + +/* Trace atom_id stopping on JS_HEAD */ +#define KBASE_TIMELINE_JOB_STOP(kctx, js, _producerof_atom_number_completed) \ + do { \ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0) + struct timespec64 ts; \ + ktime_get_raw_ts64(&ts); \ +#else + struct timespec ts; \ + getrawmonotonic(&ts); \ +#endif + trace_mali_timeline_slot_atom(ts.tv_sec, ts.tv_nsec, \ + HW_STOP_GPU_JOB_CHAIN_SW_APPROX, \ + (int)kctx->timeline.owner_tgid, \ + js, _producerof_atom_number_completed); \ + } while (0) + +/** Trace beginning/end of a call to kbase_pm_check_transitions_nolock from a + * certin caller */ +#define KBASE_TIMELINE_PM_CHECKTRANS(kbdev, trace_code) \ + do { \ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0) + struct timespec64 ts; \ + ktime_get_raw_ts64(&ts); \ +#else + struct timespec ts; \ + getrawmonotonic(&ts); \ +#endif + trace_mali_timeline_pm_checktrans(ts.tv_sec, ts.tv_nsec, \ + trace_code, 1); \ + } while (0) + +/* Trace number of contexts active */ +#define KBASE_TIMELINE_CONTEXT_ACTIVE(kbdev, count) \ + do { \ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0) + struct timespec64 ts; \ + ktime_get_raw_ts64(&ts); \ +#else + struct timespec ts; \ + getrawmonotonic(&ts); \ +#endif + trace_mali_timeline_context_active(ts.tv_sec, ts.tv_nsec, \ + count); \ + } while (0) + +/* NOTE: kbase_timeline_pm_cores_func() is in mali_kbase_pm_policy.c */ + +/** + * Trace that an atom is starting on a job slot + * + * The caller must be holding hwaccess_lock + */ +void kbase_timeline_job_slot_submit(struct kbase_device *kbdev, struct kbase_context *kctx, + struct kbase_jd_atom *katom, int js); + +/** + * Trace that an atom has done on a job slot + * + * 'Done' in this sense can occur either because: + * - the atom in JS_HEAD finished + * - the atom in JS_NEXT was evicted + * + * Whether the atom finished or was evicted is passed in @a done_code + * + * It is assumed that the atom has already been removed from the submit slot, + * with either: + * - kbasep_jm_dequeue_submit_slot() + * - kbasep_jm_dequeue_tail_submit_slot() + * + * The caller must be holding hwaccess_lock + */ +void kbase_timeline_job_slot_done(struct kbase_device *kbdev, struct kbase_context *kctx, + struct kbase_jd_atom *katom, int js, + kbasep_js_atom_done_code done_code); + + +/** Trace a pm event starting */ +void kbase_timeline_pm_send_event(struct kbase_device *kbdev, + enum kbase_timeline_pm_event event_sent); + +/** Trace a pm event finishing */ +void kbase_timeline_pm_check_handle_event(struct kbase_device *kbdev, enum kbase_timeline_pm_event event); + +/** Check whether a pm event was present, and if so trace finishing it */ +void kbase_timeline_pm_handle_event(struct kbase_device *kbdev, enum kbase_timeline_pm_event event); + +/** Trace L2 power-up start */ +void kbase_timeline_pm_l2_transition_start(struct kbase_device *kbdev); + +/** Trace L2 power-up done */ +void kbase_timeline_pm_l2_transition_done(struct kbase_device *kbdev); + +#else + +#define KBASE_TIMELINE_ATOMS_IN_FLIGHT(kctx, count) CSTD_NOP() + +#define KBASE_TIMELINE_ATOM_READY(kctx, atom_id) CSTD_NOP() + +#define KBASE_TIMELINE_ATOMS_SUBMITTED(kctx, js, count) CSTD_NOP() + +#define KBASE_TIMELINE_JOB_START_NEXT(kctx, js, count) CSTD_NOP() + +#define KBASE_TIMELINE_JOB_START_HEAD(kctx, js, count) CSTD_NOP() + +#define KBASE_TIMELINE_TRY_SOFT_STOP(kctx, js, count) CSTD_NOP() + +#define KBASE_TIMELINE_GPU_POWER(kbdev, active) CSTD_NOP() + +#define KBASE_TIMELINE_POWER_TILER(kbdev, bitmap) CSTD_NOP() + +#define KBASE_TIMELINE_POWER_SHADER(kbdev, bitmap) CSTD_NOP() + +#define KBASE_TIMELINE_POWER_L2(kbdev, active) CSTD_NOP() + +#define KBASE_TIMELINE_POWERING_L2(kbdev) CSTD_NOP() + +#define KBASE_TIMELINE_POWERED_L2(kbdev) CSTD_NOP() + +#define KBASE_TIMELINE_PM_SEND_EVENT(kbdev, event_type, pm_event_id) CSTD_NOP() + +#define KBASE_TIMELINE_PM_HANDLE_EVENT(kbdev, event_type, pm_event_id) CSTD_NOP() + +#define KBASE_TIMELINE_JOB_START(kctx, js, _consumerof_atom_number) CSTD_NOP() + +#define KBASE_TIMELINE_JOB_STOP(kctx, js, _producerof_atom_number_completed) CSTD_NOP() + +#define KBASE_TIMELINE_PM_CHECKTRANS(kbdev, trace_code) CSTD_NOP() + +#define KBASE_TIMELINE_CONTEXT_ACTIVE(kbdev, count) CSTD_NOP() + +static inline void kbase_timeline_job_slot_submit(struct kbase_device *kbdev, struct kbase_context *kctx, + struct kbase_jd_atom *katom, int js) +{ + lockdep_assert_held(&kbdev->hwaccess_lock); +} + +static inline void kbase_timeline_job_slot_done(struct kbase_device *kbdev, struct kbase_context *kctx, + struct kbase_jd_atom *katom, int js, + kbasep_js_atom_done_code done_code) +{ + lockdep_assert_held(&kbdev->hwaccess_lock); +} + +static inline void kbase_timeline_pm_send_event(struct kbase_device *kbdev, enum kbase_timeline_pm_event event_sent) +{ +} + +static inline void kbase_timeline_pm_check_handle_event(struct kbase_device *kbdev, enum kbase_timeline_pm_event event) +{ +} + +static inline void kbase_timeline_pm_handle_event(struct kbase_device *kbdev, enum kbase_timeline_pm_event event) +{ +} + +static inline void kbase_timeline_pm_l2_transition_start(struct kbase_device *kbdev) +{ +} + +static inline void kbase_timeline_pm_l2_transition_done(struct kbase_device *kbdev) +{ +} +#endif /* CONFIG_MALI_TRACE_TIMELINE */ + +#endif /* _KBASE_TRACE_TIMELINE_H */ + diff --git a/drivers/gpu/arm/midgard/mali_kbase_trace_timeline_defs.h b/drivers/gpu/arm/midgard/mali_kbase_trace_timeline_defs.h new file mode 100644 index 00000000000000..156a95a67f4aef --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_trace_timeline_defs.h @@ -0,0 +1,140 @@ +/* + * + * (C) COPYRIGHT 2012-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + + +/* ***** IMPORTANT: THIS IS NOT A NORMAL HEADER FILE ***** + * ***** DO NOT INCLUDE DIRECTLY ***** + * ***** THE LACK OF HEADER GUARDS IS INTENTIONAL ***** */ + +/* + * Conventions on Event Names: + * + * - The prefix determines something about how the timeline should be + * displayed, and is split up into various parts, separated by underscores: + * - 'SW' and 'HW' as the first part will be used to determine whether a + * timeline is to do with Software or Hardware - effectively, separate + * 'channels' for Software and Hardware + * - 'START', 'STOP', 'ENTER', 'LEAVE' can be used in the second part, and + * signify related pairs of events - these are optional. + * - 'FLOW' indicates a generic event, which can use dependencies + * - This gives events such as: + * - 'SW_ENTER_FOO' + * - 'SW_LEAVE_FOO' + * - 'SW_FLOW_BAR_1' + * - 'SW_FLOW_BAR_2' + * - 'HW_START_BAZ' + * - 'HW_STOP_BAZ' + * - And an unadorned HW event: + * - 'HW_BAZ_FROZBOZ' + */ + +/* + * Conventions on parameter names: + * - anything with 'instance' in the name will have a separate timeline based + * on that instances. + * - underscored-prefixed parameters will by hidden by default on timelines + * + * Hence: + * - Different job slots have their own 'instance', based on the instance value + * - Per-context info (e.g. atoms on a context) have their own 'instance' + * (i.e. each context should be on a different timeline) + * + * Note that globally-shared resources can be tagged with a tgid, but we don't + * want an instance per context: + * - There's no point having separate Job Slot timelines for each context, that + * would be confusing - there's only really 3 job slots! + * - There's no point having separate Shader-powered timelines for each + * context, that would be confusing - all shader cores (whether it be 4, 8, + * etc) are shared in the system. + */ + + /* + * CTX events + */ + /* Separate timelines for each context 'instance'*/ + KBASE_TIMELINE_TRACE_CODE(CTX_SET_NR_ATOMS_IN_FLIGHT, "CTX: Atoms in flight", "%d,%d", "_instance_tgid,_value_number_of_atoms"), + KBASE_TIMELINE_TRACE_CODE(CTX_FLOW_ATOM_READY, "CTX: Atoms Ready to Run", "%d,%d,%d", "_instance_tgid,_consumerof_atom_number,_producerof_atom_number_ready"), + + /* + * SW Events + */ + /* Separate timelines for each slot 'instance' */ + KBASE_TIMELINE_TRACE_CODE(SW_SET_GPU_SLOT_ACTIVE, "SW: GPU slot active", "%d,%d,%d", "_tgid,_instance_slot,_value_number_of_atoms"), + KBASE_TIMELINE_TRACE_CODE(SW_SET_GPU_SLOT_NEXT, "SW: GPU atom in NEXT", "%d,%d,%d", "_tgid,_instance_slot,_value_is_an_atom_in_next"), + KBASE_TIMELINE_TRACE_CODE(SW_SET_GPU_SLOT_HEAD, "SW: GPU atom in HEAD", "%d,%d,%d", "_tgid,_instance_slot,_value_is_an_atom_in_head"), + KBASE_TIMELINE_TRACE_CODE(SW_SET_GPU_SLOT_STOPPING, "SW: Try Soft-Stop on GPU slot", "%d,%d,%d", "_tgid,_instance_slot,_value_is_slot_stopping"), + /* Shader and overall power is shared - can't have separate instances of + * it, just tagging with the context */ + KBASE_TIMELINE_TRACE_CODE(SW_SET_GPU_POWER_ACTIVE, "SW: GPU power active", "%d,%d", "_tgid,_value_is_power_active"), + KBASE_TIMELINE_TRACE_CODE(SW_SET_GPU_POWER_TILER_ACTIVE, "SW: GPU tiler powered", "%d,%d", "_tgid,_value_number_of_tilers"), + KBASE_TIMELINE_TRACE_CODE(SW_SET_GPU_POWER_SHADER_ACTIVE, "SW: GPU shaders powered", "%d,%d", "_tgid,_value_number_of_shaders"), + KBASE_TIMELINE_TRACE_CODE(SW_SET_GPU_POWER_L2_ACTIVE, "SW: GPU L2 powered", "%d,%d", "_tgid,_value_number_of_l2"), + + /* SW Power event messaging. _event_type is one from the kbase_pm_event enum */ + KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_SEND_EVENT, "SW: PM Send Event", "%d,%d,%d", "_tgid,_event_type,_writerof_pm_event_id"), + KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_HANDLE_EVENT, "SW: PM Handle Event", "%d,%d,%d", "_tgid,_event_type,_finalconsumerof_pm_event_id"), + /* SW L2 power events */ + KBASE_TIMELINE_TRACE_CODE(SW_FLOW_GPU_POWER_L2_POWERING, "SW: GPU L2 powering", "%d,%d", "_tgid,_writerof_l2_transitioning"), + KBASE_TIMELINE_TRACE_CODE(SW_FLOW_GPU_POWER_L2_ACTIVE, "SW: GPU L2 powering done", "%d,%d", "_tgid,_finalconsumerof_l2_transitioning"), + + KBASE_TIMELINE_TRACE_CODE(SW_SET_CONTEXT_ACTIVE, "SW: Context Active", "%d,%d", "_tgid,_value_active"), + + /* + * BEGIN: Significant SW Functions that call kbase_pm_check_transitions_nolock() + */ + KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_DO_POWEROFF_START, "SW: PM CheckTrans from kbase_pm_do_poweroff", "%d,%d", "_tgid,_writerof_pm_checktrans_pm_do_poweroff"), + KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_DO_POWEROFF_END, "SW: PM CheckTrans from kbase_pm_do_poweroff", "%d,%d", "_tgid,_finalconsumerof_pm_checktrans_pm_do_poweroff"), + KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_DO_POWERON_START, "SW: PM CheckTrans from kbase_pm_do_poweron", "%d,%d", "_tgid,_writerof_pm_checktrans_pm_do_poweron"), + KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_DO_POWERON_END, "SW: PM CheckTrans from kbase_pm_do_poweron", "%d,%d", "_tgid,_finalconsumerof_pm_checktrans_pm_do_poweron"), + KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_GPU_INTERRUPT_START, "SW: PM CheckTrans from kbase_gpu_interrupt", "%d,%d", "_tgid,_writerof_pm_checktrans_gpu_interrupt"), + KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_GPU_INTERRUPT_END, "SW: PM CheckTrans from kbase_gpu_interrupt", "%d,%d", "_tgid,_finalconsumerof_pm_checktrans_gpu_interrupt"), + + /* + * Significant Indirect callers of kbase_pm_check_transitions_nolock() + */ + /* kbase_pm_request_cores */ + KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_REQUEST_CORES_SHADER_START, "SW: PM CheckTrans from kbase_pm_request_cores(shader)", "%d,%d", "_tgid,_writerof_pm_checktrans_pm_request_cores_shader"), + KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_REQUEST_CORES_SHADER_END, "SW: PM CheckTrans from kbase_pm_request_cores(shader)", "%d,%d", "_tgid,_finalconsumerof_pm_checktrans_pm_request_cores_shader"), + KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_REQUEST_CORES_TILER_START, "SW: PM CheckTrans from kbase_pm_request_cores(tiler)", "%d,%d", "_tgid,_writerof_pm_checktrans_pm_request_cores_tiler"), + KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_REQUEST_CORES_TILER_END, "SW: PM CheckTrans from kbase_pm_request_cores(tiler)", "%d,%d", "_tgid,_finalconsumerof_pm_checktrans_pm_request_cores_tiler"), + KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_REQUEST_CORES_SHADER_TILER_START, "SW: PM CheckTrans from kbase_pm_request_cores(shader+tiler)", "%d,%d", "_tgid,_writerof_pm_checktrans_pm_request_cores_shader_tiler"), + KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_REQUEST_CORES_SHADER_TILER_END, "SW: PM CheckTrans from kbase_pm_request_cores(shader+tiler)", "%d,%d", "_tgid,_finalconsumerof_pm_checktrans_pm_request_cores_shader_tiler"), + /* kbase_pm_release_cores */ + KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_SHADER_START, "SW: PM CheckTrans from kbase_pm_release_cores(shader)", "%d,%d", "_tgid,_writerof_pm_checktrans_pm_release_cores_shader"), + KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_SHADER_END, "SW: PM CheckTrans from kbase_pm_release_cores(shader)", "%d,%d", "_tgid,_finalconsumerof_pm_checktrans_pm_release_cores_shader"), + KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_TILER_START, "SW: PM CheckTrans from kbase_pm_release_cores(tiler)", "%d,%d", "_tgid,_writerof_pm_checktrans_pm_release_cores_tiler"), + KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_TILER_END, "SW: PM CheckTrans from kbase_pm_release_cores(tiler)", "%d,%d", "_tgid,_finalconsumerof_pm_checktrans_pm_release_cores_tiler"), + KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_SHADER_TILER_START, "SW: PM CheckTrans from kbase_pm_release_cores(shader+tiler)", "%d,%d", "_tgid,_writerof_pm_checktrans_pm_release_cores_shader_tiler"), + KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_SHADER_TILER_END, "SW: PM CheckTrans from kbase_pm_release_cores(shader+tiler)", "%d,%d", "_tgid,_finalconsumerof_pm_checktrans_pm_release_cores_shader_tiler"), + KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_DEFERRED_START, "SW: PM CheckTrans from kbasep_pm_do_shader_poweroff_callback", "%d,%d", "_tgid,_writerof_pm_checktrans_pm_do_shader_poweroff_callback"), + KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_DEFERRED_END, "SW: PM CheckTrans from kbasep_pm_do_shader_poweroff_callback", "%d,%d", "_tgid,_finalconsumerof_pm_checktrans_pm_do_shader_poweroff_callback"), + /* + * END: SW Functions that call kbase_pm_check_transitions_nolock() + */ + + /* + * HW Events + */ + KBASE_TIMELINE_TRACE_CODE(HW_MMU_FAULT, +"HW: MMU Fault", "%d,%d,%d", "_tgid,fault_type,fault_stage,asid"), + KBASE_TIMELINE_TRACE_CODE(HW_START_GPU_JOB_CHAIN_SW_APPROX, +"HW: Job Chain start (SW approximated)", "%d,%d,%d", +"_tgid,job_slot,_consumerof_atom_number_ready"), + KBASE_TIMELINE_TRACE_CODE(HW_STOP_GPU_JOB_CHAIN_SW_APPROX, +"HW: Job Chain stop (SW approximated)", "%d,%d,%d", +"_tgid,job_slot,_producerof_atom_number_completed") diff --git a/drivers/gpu/arm/midgard/mali_kbase_uku.h b/drivers/gpu/arm/midgard/mali_kbase_uku.h new file mode 100644 index 00000000000000..f0477cc918db22 --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_uku.h @@ -0,0 +1,489 @@ +/* + * + * (C) COPYRIGHT 2008-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + + +#ifndef _KBASE_UKU_H_ +#define _KBASE_UKU_H_ + +#include "mali_uk.h" +#include "mali_base_kernel.h" + +/* This file needs to support being included from kernel and userside (which use different defines) */ +#if defined(CONFIG_MALI_ERROR_INJECT) || MALI_ERROR_INJECT_ON +#define SUPPORT_MALI_ERROR_INJECT +#endif /* defined(CONFIG_MALI_ERROR_INJECT) || MALI_ERROR_INJECT_ON */ +#if defined(CONFIG_MALI_NO_MALI) +#define SUPPORT_MALI_NO_MALI +#elif defined(MALI_NO_MALI) +#if MALI_NO_MALI +#define SUPPORT_MALI_NO_MALI +#endif +#endif + +#if defined(SUPPORT_MALI_NO_MALI) || defined(SUPPORT_MALI_ERROR_INJECT) +#include "backend/gpu/mali_kbase_model_dummy.h" +#endif + +#include "mali_kbase_gpuprops_types.h" + +/* + * 10.1: + * - Do mmap in kernel for SAME_VA memory allocations rather then + * calling back into the kernel as a 2nd stage of the allocation request. + * + * 10.2: + * - Add KBASE_FUNC_MEM_JIT_INIT which allows clients to request a custom VA + * region for use with JIT (ignored on 32-bit platforms) + * + * 10.3: + * - base_jd_core_req typedef-ed to u32 (instead of to u16) + * - two flags added: BASE_JD_REQ_SKIP_CACHE_STAT / _END + * + * 10.4: + * - Removed KBASE_FUNC_EXT_BUFFER_LOCK used only in internal tests + * + * 10.5: + * - Reverted to performing mmap in user space so that tools like valgrind work. + * + * 10.6: + * - Add flags input variable to KBASE_FUNC_TLSTREAM_ACQUIRE + */ + +#define LINUX_UK_BASE_MAGIC 0x80 + +struct kbase_uk_mem_alloc { + union uk_header header; + /* IN */ + u64 va_pages; + u64 commit_pages; + u64 extent; + /* IN/OUT */ + u64 flags; + /* OUT */ + u64 gpu_va; + u16 va_alignment; + u8 padding[6]; +}; + +struct kbase_uk_mem_free { + union uk_header header; + /* IN */ + u64 gpu_addr; + /* OUT */ +}; + +struct kbase_uk_mem_alias { + union uk_header header; + /* IN/OUT */ + u64 flags; + /* IN */ + u64 stride; + u64 nents; + u64 ai; + /* OUT */ + u64 gpu_va; + u64 va_pages; +}; + +struct kbase_uk_mem_import { + union uk_header header; + /* IN */ + u64 phandle; + u32 type; + u32 padding; + /* IN/OUT */ + u64 flags; + /* OUT */ + u64 gpu_va; + u64 va_pages; +}; + +struct kbase_uk_mem_flags_change { + union uk_header header; + /* IN */ + u64 gpu_va; + u64 flags; + u64 mask; +}; + +struct kbase_uk_job_submit { + union uk_header header; + /* IN */ + u64 addr; + u32 nr_atoms; + u32 stride; /* bytes between atoms, i.e. sizeof(base_jd_atom_v2) */ + /* OUT */ +}; + +struct kbase_uk_post_term { + union uk_header header; +}; + +struct kbase_uk_sync_now { + union uk_header header; + + /* IN */ + struct base_syncset sset; + + /* OUT */ +}; + +struct kbase_uk_hwcnt_dump { + union uk_header header; +}; + +struct kbase_uk_hwcnt_clear { + union uk_header header; +}; + +struct kbase_uk_fence_validate { + union uk_header header; + /* IN */ + s32 fd; + u32 padding; + /* OUT */ +}; + +struct kbase_uk_stream_create { + union uk_header header; + /* IN */ + char name[32]; + /* OUT */ + s32 fd; + u32 padding; +}; + +struct kbase_uk_gpuprops { + union uk_header header; + + /* IN */ + struct mali_base_gpu_props props; + /* OUT */ +}; + +struct kbase_uk_mem_query { + union uk_header header; + /* IN */ + u64 gpu_addr; +#define KBASE_MEM_QUERY_COMMIT_SIZE 1 +#define KBASE_MEM_QUERY_VA_SIZE 2 +#define KBASE_MEM_QUERY_FLAGS 3 + u64 query; + /* OUT */ + u64 value; +}; + +struct kbase_uk_mem_commit { + union uk_header header; + /* IN */ + u64 gpu_addr; + u64 pages; + /* OUT */ + u32 result_subcode; + u32 padding; +}; + +struct kbase_uk_find_cpu_offset { + union uk_header header; + /* IN */ + u64 gpu_addr; + u64 cpu_addr; + u64 size; + /* OUT */ + u64 offset; +}; + +#define KBASE_GET_VERSION_BUFFER_SIZE 64 +struct kbase_uk_get_ddk_version { + union uk_header header; + /* OUT */ + char version_buffer[KBASE_GET_VERSION_BUFFER_SIZE]; + u32 version_string_size; + u32 padding; +}; + +struct kbase_uk_disjoint_query { + union uk_header header; + /* OUT */ + u32 counter; + u32 padding; +}; + +struct kbase_uk_set_flags { + union uk_header header; + /* IN */ + u32 create_flags; + u32 padding; +}; + +#if MALI_UNIT_TEST +#define TEST_ADDR_COUNT 4 +#define KBASE_TEST_BUFFER_SIZE 128 +struct kbase_exported_test_data { + u64 test_addr[TEST_ADDR_COUNT]; /**< memory address */ + u32 test_addr_pages[TEST_ADDR_COUNT]; /**< memory size in pages */ + u64 kctx; /**< base context created by process */ + u64 mm; /**< pointer to process address space */ + u8 buffer1[KBASE_TEST_BUFFER_SIZE]; /**< unit test defined parameter */ + u8 buffer2[KBASE_TEST_BUFFER_SIZE]; /**< unit test defined parameter */ +}; + +struct kbase_uk_set_test_data { + union uk_header header; + /* IN */ + struct kbase_exported_test_data test_data; +}; + +#endif /* MALI_UNIT_TEST */ + +#ifdef SUPPORT_MALI_ERROR_INJECT +struct kbase_uk_error_params { + union uk_header header; + /* IN */ + struct kbase_error_params params; +}; +#endif /* SUPPORT_MALI_ERROR_INJECT */ + +#ifdef SUPPORT_MALI_NO_MALI +struct kbase_uk_model_control_params { + union uk_header header; + /* IN */ + struct kbase_model_control_params params; +}; +#endif /* SUPPORT_MALI_NO_MALI */ + +struct kbase_uk_profiling_controls { + union uk_header header; + u32 profiling_controls[FBDUMP_CONTROL_MAX]; +}; + +struct kbase_uk_debugfs_mem_profile_add { + union uk_header header; + u32 len; + u32 padding; + u64 buf; +}; + +struct kbase_uk_context_id { + union uk_header header; + /* OUT */ + int id; +}; + +/** + * struct kbase_uk_tlstream_acquire - User/Kernel space data exchange structure + * @header: UK structure header + * @flags: timeline stream flags + * @fd: timeline stream file descriptor + * + * This structure is used when performing a call to acquire kernel side timeline + * stream file descriptor. + */ +struct kbase_uk_tlstream_acquire { + union uk_header header; + /* IN */ + u32 flags; + /* OUT */ + s32 fd; +}; + +/** + * struct kbase_uk_tlstream_acquire_v10_4 - User/Kernel space data exchange + * structure + * @header: UK structure header + * @fd: timeline stream file descriptor + * + * This structure is used when performing a call to acquire kernel side timeline + * stream file descriptor. + */ +struct kbase_uk_tlstream_acquire_v10_4 { + union uk_header header; + /* IN */ + /* OUT */ + s32 fd; +}; + +/** + * struct kbase_uk_tlstream_flush - User/Kernel space data exchange structure + * @header: UK structure header + * + * This structure is used when performing a call to flush kernel side + * timeline streams. + */ +struct kbase_uk_tlstream_flush { + union uk_header header; + /* IN */ + /* OUT */ +}; + +#if MALI_UNIT_TEST +/** + * struct kbase_uk_tlstream_test - User/Kernel space data exchange structure + * @header: UK structure header + * @tpw_count: number of trace point writers in each context + * @msg_delay: time delay between tracepoints from one writer in milliseconds + * @msg_count: number of trace points written by one writer + * @aux_msg: if non-zero aux messages will be included + * + * This structure is used when performing a call to start timeline stream test + * embedded in kernel. + */ +struct kbase_uk_tlstream_test { + union uk_header header; + /* IN */ + u32 tpw_count; + u32 msg_delay; + u32 msg_count; + u32 aux_msg; + /* OUT */ +}; + +/** + * struct kbase_uk_tlstream_stats - User/Kernel space data exchange structure + * @header: UK structure header + * @bytes_collected: number of bytes read by user + * @bytes_generated: number of bytes generated by tracepoints + * + * This structure is used when performing a call to obtain timeline stream + * statistics. + */ +struct kbase_uk_tlstream_stats { + union uk_header header; /**< UK structure header. */ + /* IN */ + /* OUT */ + u32 bytes_collected; + u32 bytes_generated; +}; +#endif /* MALI_UNIT_TEST */ + +/** + * struct struct kbase_uk_prfcnt_value for the KBASE_FUNC_SET_PRFCNT_VALUES ioctl + * @header: UK structure header + * @data: Counter samples for the dummy model + * @size:............Size of the counter sample data + */ +struct kbase_uk_prfcnt_values { + union uk_header header; + /* IN */ + u32 *data; + u32 size; +}; + +/** + * struct kbase_uk_soft_event_update - User/Kernel space data exchange structure + * @header: UK structure header + * @evt: the GPU address containing the event + * @new_status: the new event status, must be either BASE_JD_SOFT_EVENT_SET or + * BASE_JD_SOFT_EVENT_RESET + * @flags: reserved for future uses, must be set to 0 + * + * This structure is used to update the status of a software event. If the + * event's status is set to BASE_JD_SOFT_EVENT_SET, any job currently waiting + * on this event will complete. + */ +struct kbase_uk_soft_event_update { + union uk_header header; + /* IN */ + u64 evt; + u32 new_status; + u32 flags; +}; + +/** + * struct kbase_uk_mem_jit_init - User/Kernel space data exchange structure + * @header: UK structure header + * @va_pages: Number of virtual pages required for JIT + * + * This structure is used when requesting initialization of JIT. + */ +struct kbase_uk_mem_jit_init { + union uk_header header; + /* IN */ + u64 va_pages; +}; + +enum kbase_uk_function_id { + KBASE_FUNC_MEM_ALLOC = (UK_FUNC_ID + 0), + KBASE_FUNC_MEM_IMPORT = (UK_FUNC_ID + 1), + KBASE_FUNC_MEM_COMMIT = (UK_FUNC_ID + 2), + KBASE_FUNC_MEM_QUERY = (UK_FUNC_ID + 3), + KBASE_FUNC_MEM_FREE = (UK_FUNC_ID + 4), + KBASE_FUNC_MEM_FLAGS_CHANGE = (UK_FUNC_ID + 5), + KBASE_FUNC_MEM_ALIAS = (UK_FUNC_ID + 6), + + /* UK_FUNC_ID + 7 not in use since BASE_LEGACY_UK6_SUPPORT dropped */ + + KBASE_FUNC_SYNC = (UK_FUNC_ID + 8), + + KBASE_FUNC_POST_TERM = (UK_FUNC_ID + 9), + + KBASE_FUNC_HWCNT_SETUP = (UK_FUNC_ID + 10), + KBASE_FUNC_HWCNT_DUMP = (UK_FUNC_ID + 11), + KBASE_FUNC_HWCNT_CLEAR = (UK_FUNC_ID + 12), + + KBASE_FUNC_GPU_PROPS_REG_DUMP = (UK_FUNC_ID + 14), + + KBASE_FUNC_FIND_CPU_OFFSET = (UK_FUNC_ID + 15), + + KBASE_FUNC_GET_VERSION = (UK_FUNC_ID + 16), + KBASE_FUNC_SET_FLAGS = (UK_FUNC_ID + 18), + + KBASE_FUNC_SET_TEST_DATA = (UK_FUNC_ID + 19), + KBASE_FUNC_INJECT_ERROR = (UK_FUNC_ID + 20), + KBASE_FUNC_MODEL_CONTROL = (UK_FUNC_ID + 21), + + /* UK_FUNC_ID + 22 not in use since BASE_LEGACY_UK8_SUPPORT dropped */ + + KBASE_FUNC_FENCE_VALIDATE = (UK_FUNC_ID + 23), + KBASE_FUNC_STREAM_CREATE = (UK_FUNC_ID + 24), + KBASE_FUNC_GET_PROFILING_CONTROLS = (UK_FUNC_ID + 25), + KBASE_FUNC_SET_PROFILING_CONTROLS = (UK_FUNC_ID + 26), + /* to be used only for testing + * purposes, otherwise these controls + * are set through gator API */ + + KBASE_FUNC_DEBUGFS_MEM_PROFILE_ADD = (UK_FUNC_ID + 27), + KBASE_FUNC_JOB_SUBMIT = (UK_FUNC_ID + 28), + KBASE_FUNC_DISJOINT_QUERY = (UK_FUNC_ID + 29), + + KBASE_FUNC_GET_CONTEXT_ID = (UK_FUNC_ID + 31), + + KBASE_FUNC_TLSTREAM_ACQUIRE_V10_4 = (UK_FUNC_ID + 32), +#if MALI_UNIT_TEST + KBASE_FUNC_TLSTREAM_TEST = (UK_FUNC_ID + 33), + KBASE_FUNC_TLSTREAM_STATS = (UK_FUNC_ID + 34), +#endif /* MALI_UNIT_TEST */ + KBASE_FUNC_TLSTREAM_FLUSH = (UK_FUNC_ID + 35), + + KBASE_FUNC_HWCNT_READER_SETUP = (UK_FUNC_ID + 36), + +#ifdef SUPPORT_MALI_NO_MALI + KBASE_FUNC_SET_PRFCNT_VALUES = (UK_FUNC_ID + 37), +#endif + + KBASE_FUNC_SOFT_EVENT_UPDATE = (UK_FUNC_ID + 38), + + KBASE_FUNC_MEM_JIT_INIT = (UK_FUNC_ID + 39), + + KBASE_FUNC_TLSTREAM_ACQUIRE = (UK_FUNC_ID + 40), + + KBASE_FUNC_MAX +}; + +#endif /* _KBASE_UKU_H_ */ + diff --git a/drivers/gpu/arm/midgard/mali_kbase_utility.c b/drivers/gpu/arm/midgard/mali_kbase_utility.c new file mode 100644 index 00000000000000..be474ff8740124 --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_utility.c @@ -0,0 +1,33 @@ +/* + * + * (C) COPYRIGHT 2012-2013, 2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + + +#include + +bool kbasep_list_member_of(const struct list_head *base, struct list_head *entry) +{ + struct list_head *pos = base->next; + + while (pos != base) { + if (pos == entry) + return true; + + pos = pos->next; + } + return false; +} diff --git a/drivers/gpu/arm/midgard/mali_kbase_utility.h b/drivers/gpu/arm/midgard/mali_kbase_utility.h new file mode 100644 index 00000000000000..fd7252dab0de25 --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_utility.h @@ -0,0 +1,37 @@ +/* + * + * (C) COPYRIGHT 2012-2013, 2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + + +#ifndef _KBASE_UTILITY_H +#define _KBASE_UTILITY_H + +#ifndef _KBASE_H_ +#error "Don't include this file directly, use mali_kbase.h instead" +#endif + +/** Test whether the given list entry is a member of the given list. + * + * @param base The head of the list to be tested + * @param entry The list entry to be tested + * + * @return true if entry is a member of base + * false otherwise + */ +bool kbasep_list_member_of(const struct list_head *base, struct list_head *entry); + +#endif /* _KBASE_UTILITY_H */ diff --git a/drivers/gpu/arm/midgard/mali_kbase_vinstr.c b/drivers/gpu/arm/midgard/mali_kbase_vinstr.c new file mode 100644 index 00000000000000..eb67b333c1c493 --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_vinstr.c @@ -0,0 +1,2081 @@ +/* + * + * (C) COPYRIGHT 2011-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_MALI_NO_MALI +#include +#endif + +/*****************************************************************************/ + +/* Hwcnt reader API version */ +#define HWCNT_READER_API 1 + +/* The number of nanoseconds in a second. */ +#define NSECS_IN_SEC 1000000000ull /* ns */ + +/* The time resolution of dumping service. */ +#define DUMPING_RESOLUTION 500000ull /* ns */ + +/* The maximal supported number of dumping buffers. */ +#define MAX_BUFFER_COUNT 32 + +/* Size and number of hw counters blocks. */ +#define NR_CNT_BLOCKS_PER_GROUP 8 +#define NR_CNT_PER_BLOCK 64 +#define NR_BYTES_PER_CNT 4 +#define NR_BYTES_PER_HDR 16 +#define PRFCNT_EN_MASK_OFFSET 0x8 + +/*****************************************************************************/ + +enum { + SHADER_HWCNT_BM, + TILER_HWCNT_BM, + MMU_L2_HWCNT_BM, + JM_HWCNT_BM +}; + +enum vinstr_state { + VINSTR_IDLE, + VINSTR_DUMPING, + VINSTR_SUSPENDING, + VINSTR_SUSPENDED, + VINSTR_RESUMING +}; + +/** + * struct kbase_vinstr_context - vinstr context per device + * @lock: protects the entire vinstr context + * @kbdev: pointer to kbase device + * @kctx: pointer to kbase context + * @vmap: vinstr vmap for mapping hwcnt dump buffer + * @gpu_va: GPU hwcnt dump buffer address + * @cpu_va: the CPU side mapping of the hwcnt dump buffer + * @dump_size: size of the dump buffer in bytes + * @bitmap: current set of counters monitored, not always in sync + * with hardware + * @reprogram: when true, reprogram hwcnt block with the new set of + * counters + * @state: vinstr state + * @state_lock: protects information about vinstr state + * @suspend_waitq: notification queue to trigger state re-validation + * @suspend_cnt: reference counter of vinstr's suspend state + * @suspend_work: worker to execute on entering suspended state + * @resume_work: worker to execute on leaving suspended state + * @nclients: number of attached clients, pending or otherwise + * @waiting_clients: head of list of clients being periodically sampled + * @idle_clients: head of list of clients being idle + * @suspended_clients: head of list of clients being suspended + * @thread: periodic sampling thread + * @waitq: notification queue of sampling thread + * @request_pending: request for action for sampling thread + * @clients_present: when true, we have at least one client + * Note: this variable is in sync. with nclients and is + * present to preserve simplicity. Protected by state_lock. + */ +struct kbase_vinstr_context { + struct mutex lock; + struct kbase_device *kbdev; + struct kbase_context *kctx; + + struct kbase_vmap_struct vmap; + u64 gpu_va; + void *cpu_va; + size_t dump_size; + u32 bitmap[4]; + bool reprogram; + + enum vinstr_state state; + struct spinlock state_lock; + wait_queue_head_t suspend_waitq; + unsigned int suspend_cnt; + struct work_struct suspend_work; + struct work_struct resume_work; + + u32 nclients; + struct list_head waiting_clients; + struct list_head idle_clients; + struct list_head suspended_clients; + + struct task_struct *thread; + wait_queue_head_t waitq; + atomic_t request_pending; + + bool clients_present; +}; + +/** + * struct kbase_vinstr_client - a vinstr client attached to a vinstr context + * @vinstr_ctx: vinstr context client is attached to + * @list: node used to attach this client to list in vinstr context + * @buffer_count: number of buffers this client is using + * @event_mask: events this client reacts to + * @dump_size: size of one dump buffer in bytes + * @bitmap: bitmap request for JM, TILER, SHADER and MMU counters + * @legacy_buffer: userspace hwcnt dump buffer (legacy interface) + * @kernel_buffer: kernel hwcnt dump buffer (kernel client interface) + * @accum_buffer: temporary accumulation buffer for preserving counters + * @dump_time: next time this clients shall request hwcnt dump + * @dump_interval: interval between periodic hwcnt dumps + * @dump_buffers: kernel hwcnt dump buffers allocated by this client + * @dump_buffers_meta: metadata of dump buffers + * @meta_idx: index of metadata being accessed by userspace + * @read_idx: index of buffer read by userspace + * @write_idx: index of buffer being written by dumping service + * @waitq: client's notification queue + * @pending: when true, client has attached but hwcnt not yet updated + */ +struct kbase_vinstr_client { + struct kbase_vinstr_context *vinstr_ctx; + struct list_head list; + unsigned int buffer_count; + u32 event_mask; + size_t dump_size; + u32 bitmap[4]; + void __user *legacy_buffer; + void *kernel_buffer; + void *accum_buffer; + u64 dump_time; + u32 dump_interval; + char *dump_buffers; + struct kbase_hwcnt_reader_metadata *dump_buffers_meta; + atomic_t meta_idx; + atomic_t read_idx; + atomic_t write_idx; + wait_queue_head_t waitq; + bool pending; +}; + +/** + * struct kbasep_vinstr_wake_up_timer - vinstr service thread wake up timer + * @hrtimer: high resolution timer + * @vinstr_ctx: vinstr context + */ +struct kbasep_vinstr_wake_up_timer { + struct hrtimer hrtimer; + struct kbase_vinstr_context *vinstr_ctx; +}; + +/*****************************************************************************/ + +static int kbasep_vinstr_service_task(void *data); + +static unsigned int kbasep_vinstr_hwcnt_reader_poll( + struct file *filp, + poll_table *wait); +static long kbasep_vinstr_hwcnt_reader_ioctl( + struct file *filp, + unsigned int cmd, + unsigned long arg); +static int kbasep_vinstr_hwcnt_reader_mmap( + struct file *filp, + struct vm_area_struct *vma); +static int kbasep_vinstr_hwcnt_reader_release( + struct inode *inode, + struct file *filp); + +/* The timeline stream file operations structure. */ +static const struct file_operations vinstr_client_fops = { + .poll = kbasep_vinstr_hwcnt_reader_poll, + .unlocked_ioctl = kbasep_vinstr_hwcnt_reader_ioctl, + .compat_ioctl = kbasep_vinstr_hwcnt_reader_ioctl, + .mmap = kbasep_vinstr_hwcnt_reader_mmap, + .release = kbasep_vinstr_hwcnt_reader_release, +}; + +/*****************************************************************************/ + +static int enable_hwcnt(struct kbase_vinstr_context *vinstr_ctx) +{ + struct kbase_context *kctx = vinstr_ctx->kctx; + struct kbase_device *kbdev = kctx->kbdev; + struct kbase_uk_hwcnt_setup setup; + int err; + + setup.dump_buffer = vinstr_ctx->gpu_va; + setup.jm_bm = vinstr_ctx->bitmap[JM_HWCNT_BM]; + setup.tiler_bm = vinstr_ctx->bitmap[TILER_HWCNT_BM]; + setup.shader_bm = vinstr_ctx->bitmap[SHADER_HWCNT_BM]; + setup.mmu_l2_bm = vinstr_ctx->bitmap[MMU_L2_HWCNT_BM]; + + /* Mark the context as active so the GPU is kept turned on */ + /* A suspend won't happen here, because we're in a syscall from a + * userspace thread. */ + kbase_pm_context_active(kbdev); + + /* Schedule the context in */ + kbasep_js_schedule_privileged_ctx(kbdev, kctx); + err = kbase_instr_hwcnt_enable_internal(kbdev, kctx, &setup); + if (err) { + /* Release the context. This had its own Power Manager Active + * reference */ + kbasep_js_release_privileged_ctx(kbdev, kctx); + + /* Also release our Power Manager Active reference */ + kbase_pm_context_idle(kbdev); + } + + return err; +} + +static void disable_hwcnt(struct kbase_vinstr_context *vinstr_ctx) +{ + struct kbase_context *kctx = vinstr_ctx->kctx; + struct kbase_device *kbdev = kctx->kbdev; + int err; + + err = kbase_instr_hwcnt_disable_internal(kctx); + if (err) { + dev_warn(kbdev->dev, "Failed to disable HW counters (ctx:%p)", + kctx); + return; + } + + /* Release the context. This had its own Power Manager Active reference. */ + kbasep_js_release_privileged_ctx(kbdev, kctx); + + /* Also release our Power Manager Active reference. */ + kbase_pm_context_idle(kbdev); + + dev_dbg(kbdev->dev, "HW counters dumping disabled for context %p", kctx); +} + +static int reprogram_hwcnt(struct kbase_vinstr_context *vinstr_ctx) +{ + disable_hwcnt(vinstr_ctx); + return enable_hwcnt(vinstr_ctx); +} + +static void hwcnt_bitmap_set(u32 dst[4], u32 src[4]) +{ + dst[JM_HWCNT_BM] = src[JM_HWCNT_BM]; + dst[TILER_HWCNT_BM] = src[TILER_HWCNT_BM]; + dst[SHADER_HWCNT_BM] = src[SHADER_HWCNT_BM]; + dst[MMU_L2_HWCNT_BM] = src[MMU_L2_HWCNT_BM]; +} + +static void hwcnt_bitmap_union(u32 dst[4], u32 src[4]) +{ + dst[JM_HWCNT_BM] |= src[JM_HWCNT_BM]; + dst[TILER_HWCNT_BM] |= src[TILER_HWCNT_BM]; + dst[SHADER_HWCNT_BM] |= src[SHADER_HWCNT_BM]; + dst[MMU_L2_HWCNT_BM] |= src[MMU_L2_HWCNT_BM]; +} + +size_t kbase_vinstr_dump_size(struct kbase_device *kbdev) +{ + size_t dump_size; + +#ifndef CONFIG_MALI_NO_MALI + if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_V4)) { + u32 nr_cg; + + nr_cg = kbdev->gpu_props.num_core_groups; + dump_size = nr_cg * NR_CNT_BLOCKS_PER_GROUP * + NR_CNT_PER_BLOCK * + NR_BYTES_PER_CNT; + } else +#endif /* CONFIG_MALI_NO_MALI */ + { + /* assume v5 for now */ + base_gpu_props *props = &kbdev->gpu_props.props; + u32 nr_l2 = props->l2_props.num_l2_slices; + u64 core_mask = props->coherency_info.group[0].core_mask; + u32 nr_blocks = fls64(core_mask); + + /* JM and tiler counter blocks are always present */ + dump_size = (2 + nr_l2 + nr_blocks) * + NR_CNT_PER_BLOCK * + NR_BYTES_PER_CNT; + } + return dump_size; +} +KBASE_EXPORT_TEST_API(kbase_vinstr_dump_size); + +static size_t kbasep_vinstr_dump_size_ctx( + struct kbase_vinstr_context *vinstr_ctx) +{ + return kbase_vinstr_dump_size(vinstr_ctx->kctx->kbdev); +} + +static int kbasep_vinstr_map_kernel_dump_buffer( + struct kbase_vinstr_context *vinstr_ctx) +{ + struct kbase_va_region *reg; + struct kbase_context *kctx = vinstr_ctx->kctx; + u64 flags, nr_pages; + + flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_WR; + vinstr_ctx->dump_size = kbasep_vinstr_dump_size_ctx(vinstr_ctx); + nr_pages = PFN_UP(vinstr_ctx->dump_size); + + reg = kbase_mem_alloc(kctx, nr_pages, nr_pages, 0, &flags, + &vinstr_ctx->gpu_va); + if (!reg) + return -ENOMEM; + + vinstr_ctx->cpu_va = kbase_vmap( + kctx, + vinstr_ctx->gpu_va, + vinstr_ctx->dump_size, + &vinstr_ctx->vmap); + if (!vinstr_ctx->cpu_va) { + kbase_mem_free(kctx, vinstr_ctx->gpu_va); + return -ENOMEM; + } + + return 0; +} + +static void kbasep_vinstr_unmap_kernel_dump_buffer( + struct kbase_vinstr_context *vinstr_ctx) +{ + struct kbase_context *kctx = vinstr_ctx->kctx; + + kbase_vunmap(kctx, &vinstr_ctx->vmap); + kbase_mem_free(kctx, vinstr_ctx->gpu_va); +} + +/** + * kbasep_vinstr_create_kctx - create kernel context for vinstr + * @vinstr_ctx: vinstr context + * Return: zero on success + */ +static int kbasep_vinstr_create_kctx(struct kbase_vinstr_context *vinstr_ctx) +{ + struct kbase_device *kbdev = vinstr_ctx->kbdev; + struct kbasep_kctx_list_element *element = NULL; + unsigned long flags; + bool enable_backend = false; + int err; + + vinstr_ctx->kctx = kbase_create_context(vinstr_ctx->kbdev, true); + if (!vinstr_ctx->kctx) + return -ENOMEM; + + /* Map the master kernel dump buffer. The HW dumps the counters + * into this memory region. */ + err = kbasep_vinstr_map_kernel_dump_buffer(vinstr_ctx); + if (err) + goto failed_map; + + /* Add kernel context to list of contexts associated with device. */ + element = kzalloc(sizeof(*element), GFP_KERNEL); + if (element) { + element->kctx = vinstr_ctx->kctx; + mutex_lock(&kbdev->kctx_list_lock); + list_add(&element->link, &kbdev->kctx_list); + + /* Inform timeline client about new context. + * Do this while holding the lock to avoid tracepoint + * being created in both body and summary stream. */ + KBASE_TLSTREAM_TL_NEW_CTX( + vinstr_ctx->kctx, + vinstr_ctx->kctx->id, + (u32)(vinstr_ctx->kctx->tgid)); + + mutex_unlock(&kbdev->kctx_list_lock); + } else { + /* Don't treat this as a fail - just warn about it. */ + dev_warn(kbdev->dev, + "couldn't add kctx to kctx_list\n"); + } + + /* Don't enable hardware counters if vinstr is suspended. + * Note that vinstr resume code is run under vinstr context lock, + * lower layer will be enabled as needed on resume. */ + spin_lock_irqsave(&vinstr_ctx->state_lock, flags); + if (VINSTR_IDLE == vinstr_ctx->state) + enable_backend = true; + vinstr_ctx->clients_present = true; + spin_unlock_irqrestore(&vinstr_ctx->state_lock, flags); + if (enable_backend) + err = enable_hwcnt(vinstr_ctx); + if (err) + goto failed_enable; + + vinstr_ctx->thread = kthread_run( + kbasep_vinstr_service_task, + vinstr_ctx, + "mali_vinstr_service"); + if (IS_ERR(vinstr_ctx->thread)) { + err = PTR_ERR(vinstr_ctx->thread); + goto failed_kthread; + } + + return 0; + +failed_kthread: + disable_hwcnt(vinstr_ctx); +failed_enable: + spin_lock_irqsave(&vinstr_ctx->state_lock, flags); + vinstr_ctx->clients_present = false; + spin_unlock_irqrestore(&vinstr_ctx->state_lock, flags); + kbasep_vinstr_unmap_kernel_dump_buffer(vinstr_ctx); + if (element) { + mutex_lock(&kbdev->kctx_list_lock); + list_del(&element->link); + kfree(element); + mutex_unlock(&kbdev->kctx_list_lock); + KBASE_TLSTREAM_TL_DEL_CTX(vinstr_ctx->kctx); + } +failed_map: + kbase_destroy_context(vinstr_ctx->kctx); + vinstr_ctx->kctx = NULL; + return err; +} + +/** + * kbasep_vinstr_destroy_kctx - destroy vinstr's kernel context + * @vinstr_ctx: vinstr context + */ +static void kbasep_vinstr_destroy_kctx(struct kbase_vinstr_context *vinstr_ctx) +{ + struct kbase_device *kbdev = vinstr_ctx->kbdev; + struct kbasep_kctx_list_element *element; + struct kbasep_kctx_list_element *tmp; + bool found = false; + unsigned long flags; + + /* Release hw counters dumping resources. */ + vinstr_ctx->thread = NULL; + disable_hwcnt(vinstr_ctx); + kbasep_vinstr_unmap_kernel_dump_buffer(vinstr_ctx); + + /* Simplify state transitions by specifying that we have no clients. */ + spin_lock_irqsave(&vinstr_ctx->state_lock, flags); + vinstr_ctx->clients_present = false; + spin_unlock_irqrestore(&vinstr_ctx->state_lock, flags); + + /* Remove kernel context from the device's contexts list. */ + mutex_lock(&kbdev->kctx_list_lock); + list_for_each_entry_safe(element, tmp, &kbdev->kctx_list, link) { + if (element->kctx == vinstr_ctx->kctx) { + list_del(&element->link); + kfree(element); + found = true; + } + } + mutex_unlock(&kbdev->kctx_list_lock); + + if (!found) + dev_warn(kbdev->dev, "kctx not in kctx_list\n"); + + /* Destroy context. */ + kbase_destroy_context(vinstr_ctx->kctx); + + /* Inform timeline client about context destruction. */ + KBASE_TLSTREAM_TL_DEL_CTX(vinstr_ctx->kctx); + + vinstr_ctx->kctx = NULL; +} + +/** + * kbasep_vinstr_attach_client - Attach a client to the vinstr core + * @vinstr_ctx: vinstr context + * @buffer_count: requested number of dump buffers + * @bitmap: bitmaps describing which counters should be enabled + * @argp: pointer where notification descriptor shall be stored + * @kernel_buffer: pointer to kernel side buffer + * + * Return: vinstr opaque client handle or NULL on failure + */ +static struct kbase_vinstr_client *kbasep_vinstr_attach_client( + struct kbase_vinstr_context *vinstr_ctx, u32 buffer_count, + u32 bitmap[4], void *argp, void *kernel_buffer) +{ + struct task_struct *thread = NULL; + struct kbase_vinstr_client *cli; + + KBASE_DEBUG_ASSERT(vinstr_ctx); + + if (buffer_count > MAX_BUFFER_COUNT + || (buffer_count & (buffer_count - 1))) + return NULL; + + cli = kzalloc(sizeof(*cli), GFP_KERNEL); + if (!cli) + return NULL; + + cli->vinstr_ctx = vinstr_ctx; + cli->buffer_count = buffer_count; + cli->event_mask = + (1 << BASE_HWCNT_READER_EVENT_MANUAL) | + (1 << BASE_HWCNT_READER_EVENT_PERIODIC); + cli->pending = true; + + hwcnt_bitmap_set(cli->bitmap, bitmap); + + mutex_lock(&vinstr_ctx->lock); + + hwcnt_bitmap_union(vinstr_ctx->bitmap, cli->bitmap); + vinstr_ctx->reprogram = true; + + /* If this is the first client, create the vinstr kbase + * context. This context is permanently resident until the + * last client exits. */ + if (!vinstr_ctx->nclients) { + hwcnt_bitmap_set(vinstr_ctx->bitmap, cli->bitmap); + if (kbasep_vinstr_create_kctx(vinstr_ctx) < 0) + goto error; + + vinstr_ctx->reprogram = false; + cli->pending = false; + } + + /* The GPU resets the counter block every time there is a request + * to dump it. We need a per client kernel buffer for accumulating + * the counters. */ + cli->dump_size = kbasep_vinstr_dump_size_ctx(vinstr_ctx); + cli->accum_buffer = kzalloc(cli->dump_size, GFP_KERNEL); + if (!cli->accum_buffer) + goto error; + + /* Prepare buffers. */ + if (cli->buffer_count) { + int *fd = (int *)argp; + size_t tmp; + + /* Allocate area for buffers metadata storage. */ + tmp = sizeof(struct kbase_hwcnt_reader_metadata) * + cli->buffer_count; + cli->dump_buffers_meta = kmalloc(tmp, GFP_KERNEL); + if (!cli->dump_buffers_meta) + goto error; + + /* Allocate required number of dumping buffers. */ + cli->dump_buffers = (char *)__get_free_pages( + GFP_KERNEL | __GFP_ZERO, + get_order(cli->dump_size * cli->buffer_count)); + if (!cli->dump_buffers) + goto error; + + /* Create descriptor for user-kernel data exchange. */ + *fd = anon_inode_getfd( + "[mali_vinstr_desc]", + &vinstr_client_fops, + cli, + O_RDONLY | O_CLOEXEC); + if (0 > *fd) + goto error; + } else if (kernel_buffer) { + cli->kernel_buffer = kernel_buffer; + } else { + cli->legacy_buffer = (void __user *)argp; + } + + atomic_set(&cli->read_idx, 0); + atomic_set(&cli->meta_idx, 0); + atomic_set(&cli->write_idx, 0); + init_waitqueue_head(&cli->waitq); + + vinstr_ctx->nclients++; + list_add(&cli->list, &vinstr_ctx->idle_clients); + + mutex_unlock(&vinstr_ctx->lock); + + return cli; + +error: + kfree(cli->dump_buffers_meta); + if (cli->dump_buffers) + free_pages( + (unsigned long)cli->dump_buffers, + get_order(cli->dump_size * cli->buffer_count)); + kfree(cli->accum_buffer); + if (!vinstr_ctx->nclients && vinstr_ctx->kctx) { + thread = vinstr_ctx->thread; + kbasep_vinstr_destroy_kctx(vinstr_ctx); + } + kfree(cli); + + mutex_unlock(&vinstr_ctx->lock); + + /* Thread must be stopped after lock is released. */ + if (thread) + kthread_stop(thread); + + return NULL; +} + +void kbase_vinstr_detach_client(struct kbase_vinstr_client *cli) +{ + struct kbase_vinstr_context *vinstr_ctx; + struct kbase_vinstr_client *iter, *tmp; + struct task_struct *thread = NULL; + u32 zerobitmap[4] = { 0 }; + int cli_found = 0; + + KBASE_DEBUG_ASSERT(cli); + vinstr_ctx = cli->vinstr_ctx; + KBASE_DEBUG_ASSERT(vinstr_ctx); + + mutex_lock(&vinstr_ctx->lock); + + list_for_each_entry_safe(iter, tmp, &vinstr_ctx->idle_clients, list) { + if (iter == cli) { + vinstr_ctx->reprogram = true; + cli_found = 1; + list_del(&iter->list); + break; + } + } + if (!cli_found) { + list_for_each_entry_safe( + iter, tmp, &vinstr_ctx->waiting_clients, list) { + if (iter == cli) { + vinstr_ctx->reprogram = true; + cli_found = 1; + list_del(&iter->list); + break; + } + } + } + KBASE_DEBUG_ASSERT(cli_found); + + kfree(cli->dump_buffers_meta); + free_pages( + (unsigned long)cli->dump_buffers, + get_order(cli->dump_size * cli->buffer_count)); + kfree(cli->accum_buffer); + kfree(cli); + + vinstr_ctx->nclients--; + if (!vinstr_ctx->nclients) { + thread = vinstr_ctx->thread; + kbasep_vinstr_destroy_kctx(vinstr_ctx); + } + + /* Rebuild context bitmap now that the client has detached */ + hwcnt_bitmap_set(vinstr_ctx->bitmap, zerobitmap); + list_for_each_entry(iter, &vinstr_ctx->idle_clients, list) + hwcnt_bitmap_union(vinstr_ctx->bitmap, iter->bitmap); + list_for_each_entry(iter, &vinstr_ctx->waiting_clients, list) + hwcnt_bitmap_union(vinstr_ctx->bitmap, iter->bitmap); + + mutex_unlock(&vinstr_ctx->lock); + + /* Thread must be stopped after lock is released. */ + if (thread) + kthread_stop(thread); +} +KBASE_EXPORT_TEST_API(kbase_vinstr_detach_client); + +/* Accumulate counters in the dump buffer */ +static void accum_dump_buffer(void *dst, void *src, size_t dump_size) +{ + size_t block_size = NR_CNT_PER_BLOCK * NR_BYTES_PER_CNT; + u32 *d = dst; + u32 *s = src; + size_t i, j; + + for (i = 0; i < dump_size; i += block_size) { + /* skip over the header block */ + d += NR_BYTES_PER_HDR / sizeof(u32); + s += NR_BYTES_PER_HDR / sizeof(u32); + for (j = 0; j < (block_size - NR_BYTES_PER_HDR) / sizeof(u32); j++) { + /* saturate result if addition would result in wraparound */ + if (U32_MAX - *d < *s) + *d = U32_MAX; + else + *d += *s; + d++; + s++; + } + } +} + +/* This is the Midgard v4 patch function. It copies the headers for each + * of the defined blocks from the master kernel buffer and then patches up + * the performance counter enable mask for each of the blocks to exclude + * counters that were not requested by the client. */ +static void patch_dump_buffer_hdr_v4( + struct kbase_vinstr_context *vinstr_ctx, + struct kbase_vinstr_client *cli) +{ + u32 *mask; + u8 *dst = cli->accum_buffer; + u8 *src = vinstr_ctx->cpu_va; + u32 nr_cg = vinstr_ctx->kctx->kbdev->gpu_props.num_core_groups; + size_t i, group_size, group; + enum { + SC0_BASE = 0 * NR_CNT_PER_BLOCK * NR_BYTES_PER_CNT, + SC1_BASE = 1 * NR_CNT_PER_BLOCK * NR_BYTES_PER_CNT, + SC2_BASE = 2 * NR_CNT_PER_BLOCK * NR_BYTES_PER_CNT, + SC3_BASE = 3 * NR_CNT_PER_BLOCK * NR_BYTES_PER_CNT, + TILER_BASE = 4 * NR_CNT_PER_BLOCK * NR_BYTES_PER_CNT, + MMU_L2_BASE = 5 * NR_CNT_PER_BLOCK * NR_BYTES_PER_CNT, + JM_BASE = 7 * NR_CNT_PER_BLOCK * NR_BYTES_PER_CNT + }; + + group_size = NR_CNT_BLOCKS_PER_GROUP * + NR_CNT_PER_BLOCK * + NR_BYTES_PER_CNT; + for (i = 0; i < nr_cg; i++) { + group = i * group_size; + /* copy shader core headers */ + memcpy(&dst[group + SC0_BASE], &src[group + SC0_BASE], + NR_BYTES_PER_HDR); + memcpy(&dst[group + SC1_BASE], &src[group + SC1_BASE], + NR_BYTES_PER_HDR); + memcpy(&dst[group + SC2_BASE], &src[group + SC2_BASE], + NR_BYTES_PER_HDR); + memcpy(&dst[group + SC3_BASE], &src[group + SC3_BASE], + NR_BYTES_PER_HDR); + + /* copy tiler header */ + memcpy(&dst[group + TILER_BASE], &src[group + TILER_BASE], + NR_BYTES_PER_HDR); + + /* copy mmu header */ + memcpy(&dst[group + MMU_L2_BASE], &src[group + MMU_L2_BASE], + NR_BYTES_PER_HDR); + + /* copy job manager header */ + memcpy(&dst[group + JM_BASE], &src[group + JM_BASE], + NR_BYTES_PER_HDR); + + /* patch the shader core enable mask */ + mask = (u32 *)&dst[group + SC0_BASE + PRFCNT_EN_MASK_OFFSET]; + *mask &= cli->bitmap[SHADER_HWCNT_BM]; + mask = (u32 *)&dst[group + SC1_BASE + PRFCNT_EN_MASK_OFFSET]; + *mask &= cli->bitmap[SHADER_HWCNT_BM]; + mask = (u32 *)&dst[group + SC2_BASE + PRFCNT_EN_MASK_OFFSET]; + *mask &= cli->bitmap[SHADER_HWCNT_BM]; + mask = (u32 *)&dst[group + SC3_BASE + PRFCNT_EN_MASK_OFFSET]; + *mask &= cli->bitmap[SHADER_HWCNT_BM]; + + /* patch the tiler core enable mask */ + mask = (u32 *)&dst[group + TILER_BASE + PRFCNT_EN_MASK_OFFSET]; + *mask &= cli->bitmap[TILER_HWCNT_BM]; + + /* patch the mmu core enable mask */ + mask = (u32 *)&dst[group + MMU_L2_BASE + PRFCNT_EN_MASK_OFFSET]; + *mask &= cli->bitmap[MMU_L2_HWCNT_BM]; + + /* patch the job manager enable mask */ + mask = (u32 *)&dst[group + JM_BASE + PRFCNT_EN_MASK_OFFSET]; + *mask &= cli->bitmap[JM_HWCNT_BM]; + } +} + +/* This is the Midgard v5 patch function. It copies the headers for each + * of the defined blocks from the master kernel buffer and then patches up + * the performance counter enable mask for each of the blocks to exclude + * counters that were not requested by the client. */ +static void patch_dump_buffer_hdr_v5( + struct kbase_vinstr_context *vinstr_ctx, + struct kbase_vinstr_client *cli) +{ + struct kbase_device *kbdev = vinstr_ctx->kctx->kbdev; + u32 i, nr_l2; + u64 core_mask; + u32 *mask; + u8 *dst = cli->accum_buffer; + u8 *src = vinstr_ctx->cpu_va; + size_t block_size = NR_CNT_PER_BLOCK * NR_BYTES_PER_CNT; + + /* copy and patch job manager header */ + memcpy(dst, src, NR_BYTES_PER_HDR); + mask = (u32 *)&dst[PRFCNT_EN_MASK_OFFSET]; + *mask &= cli->bitmap[JM_HWCNT_BM]; + dst += block_size; + src += block_size; + + /* copy and patch tiler header */ + memcpy(dst, src, NR_BYTES_PER_HDR); + mask = (u32 *)&dst[PRFCNT_EN_MASK_OFFSET]; + *mask &= cli->bitmap[TILER_HWCNT_BM]; + dst += block_size; + src += block_size; + + /* copy and patch MMU/L2C headers */ + nr_l2 = kbdev->gpu_props.props.l2_props.num_l2_slices; + for (i = 0; i < nr_l2; i++) { + memcpy(dst, src, NR_BYTES_PER_HDR); + mask = (u32 *)&dst[PRFCNT_EN_MASK_OFFSET]; + *mask &= cli->bitmap[MMU_L2_HWCNT_BM]; + dst += block_size; + src += block_size; + } + + /* copy and patch shader core headers */ + core_mask = kbdev->gpu_props.props.coherency_info.group[0].core_mask; + while (0ull != core_mask) { + memcpy(dst, src, NR_BYTES_PER_HDR); + if (0ull != (core_mask & 1ull)) { + /* if block is not reserved update header */ + mask = (u32 *)&dst[PRFCNT_EN_MASK_OFFSET]; + *mask &= cli->bitmap[SHADER_HWCNT_BM]; + } + dst += block_size; + src += block_size; + + core_mask >>= 1; + } +} + +/** + * accum_clients - accumulate dumped hw counters for all known clients + * @vinstr_ctx: vinstr context + */ +static void accum_clients(struct kbase_vinstr_context *vinstr_ctx) +{ + struct kbase_vinstr_client *iter; + int v4 = 0; + +#ifndef CONFIG_MALI_NO_MALI + v4 = kbase_hw_has_feature(vinstr_ctx->kbdev, BASE_HW_FEATURE_V4); +#endif + + list_for_each_entry(iter, &vinstr_ctx->idle_clients, list) { + /* Don't bother accumulating clients whose hwcnt requests + * have not yet been honoured. */ + if (iter->pending) + continue; + if (v4) + patch_dump_buffer_hdr_v4(vinstr_ctx, iter); + else + patch_dump_buffer_hdr_v5(vinstr_ctx, iter); + accum_dump_buffer( + iter->accum_buffer, + vinstr_ctx->cpu_va, + iter->dump_size); + } + list_for_each_entry(iter, &vinstr_ctx->waiting_clients, list) { + /* Don't bother accumulating clients whose hwcnt requests + * have not yet been honoured. */ + if (iter->pending) + continue; + if (v4) + patch_dump_buffer_hdr_v4(vinstr_ctx, iter); + else + patch_dump_buffer_hdr_v5(vinstr_ctx, iter); + accum_dump_buffer( + iter->accum_buffer, + vinstr_ctx->cpu_va, + iter->dump_size); + } +} + +/*****************************************************************************/ + +/** + * kbasep_vinstr_get_timestamp - return timestamp + * + * Function returns timestamp value based on raw monotonic timer. Value will + * wrap around zero in case of overflow. + * + * Return: timestamp value + */ +static u64 kbasep_vinstr_get_timestamp(void) +{ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0) + struct timespec64 ts; + + ktime_get_raw_ts64(&ts); +#else + struct timespec ts; + + getrawmonotonic(&ts); +#endif + return (u64)ts.tv_sec * NSECS_IN_SEC + ts.tv_nsec; +} + +/** + * kbasep_vinstr_add_dump_request - register client's dumping request + * @cli: requesting client + * @waiting_clients: list of pending dumping requests + */ +static void kbasep_vinstr_add_dump_request( + struct kbase_vinstr_client *cli, + struct list_head *waiting_clients) +{ + struct kbase_vinstr_client *tmp; + + if (list_empty(waiting_clients)) { + list_add(&cli->list, waiting_clients); + return; + } + list_for_each_entry(tmp, waiting_clients, list) { + if (tmp->dump_time > cli->dump_time) { + list_add_tail(&cli->list, &tmp->list); + return; + } + } + list_add_tail(&cli->list, waiting_clients); +} + +/** + * kbasep_vinstr_collect_and_accumulate - collect hw counters via low level + * dump and accumulate them for known + * clients + * @vinstr_ctx: vinstr context + * @timestamp: pointer where collection timestamp will be recorded + * + * Return: zero on success + */ +static int kbasep_vinstr_collect_and_accumulate( + struct kbase_vinstr_context *vinstr_ctx, u64 *timestamp) +{ + unsigned long flags; + int rcode; + +#ifdef CONFIG_MALI_NO_MALI + /* The dummy model needs the CPU mapping. */ + gpu_model_set_dummy_prfcnt_base_cpu(vinstr_ctx->cpu_va); +#endif + + spin_lock_irqsave(&vinstr_ctx->state_lock, flags); + if (VINSTR_IDLE != vinstr_ctx->state) { + spin_unlock_irqrestore(&vinstr_ctx->state_lock, flags); + return -EAGAIN; + } else { + vinstr_ctx->state = VINSTR_DUMPING; + } + spin_unlock_irqrestore(&vinstr_ctx->state_lock, flags); + + /* Request HW counters dump. + * Disable preemption to make dump timestamp more accurate. */ + preempt_disable(); + *timestamp = kbasep_vinstr_get_timestamp(); + rcode = kbase_instr_hwcnt_request_dump(vinstr_ctx->kctx); + preempt_enable(); + + if (!rcode) + rcode = kbase_instr_hwcnt_wait_for_dump(vinstr_ctx->kctx); + WARN_ON(rcode); + + spin_lock_irqsave(&vinstr_ctx->state_lock, flags); + switch (vinstr_ctx->state) + { + case VINSTR_SUSPENDING: + schedule_work(&vinstr_ctx->suspend_work); + break; + case VINSTR_DUMPING: + vinstr_ctx->state = VINSTR_IDLE; + wake_up_all(&vinstr_ctx->suspend_waitq); + break; + default: + break; + } + spin_unlock_irqrestore(&vinstr_ctx->state_lock, flags); + + /* Accumulate values of collected counters. */ + if (!rcode) + accum_clients(vinstr_ctx); + + return rcode; +} + +/** + * kbasep_vinstr_fill_dump_buffer - copy accumulated counters to empty kernel + * buffer + * @cli: requesting client + * @timestamp: timestamp when counters were collected + * @event_id: id of event that caused triggered counters collection + * + * Return: zero on success + */ +static int kbasep_vinstr_fill_dump_buffer( + struct kbase_vinstr_client *cli, u64 timestamp, + enum base_hwcnt_reader_event event_id) +{ + unsigned int write_idx = atomic_read(&cli->write_idx); + unsigned int read_idx = atomic_read(&cli->read_idx); + + struct kbase_hwcnt_reader_metadata *meta; + void *buffer; + + /* Check if there is a place to copy HWC block into. */ + if (write_idx - read_idx == cli->buffer_count) + return -1; + write_idx %= cli->buffer_count; + + /* Fill in dump buffer and its metadata. */ + buffer = &cli->dump_buffers[write_idx * cli->dump_size]; + meta = &cli->dump_buffers_meta[write_idx]; + meta->timestamp = timestamp; + meta->event_id = event_id; + meta->buffer_idx = write_idx; + memcpy(buffer, cli->accum_buffer, cli->dump_size); + return 0; +} + +/** + * kbasep_vinstr_fill_dump_buffer_legacy - copy accumulated counters to buffer + * allocated in userspace + * @cli: requesting client + * + * Return: zero on success + * + * This is part of legacy ioctl interface. + */ +static int kbasep_vinstr_fill_dump_buffer_legacy( + struct kbase_vinstr_client *cli) +{ + void __user *buffer = cli->legacy_buffer; + int rcode; + + /* Copy data to user buffer. */ + rcode = copy_to_user(buffer, cli->accum_buffer, cli->dump_size); + if (rcode) { + pr_warn("error while copying buffer to user\n"); + return -EFAULT; + } + return 0; +} + +/** + * kbasep_vinstr_fill_dump_buffer_kernel - copy accumulated counters to buffer + * allocated in kernel space + * @cli: requesting client + * + * Return: zero on success + * + * This is part of the kernel client interface. + */ +static int kbasep_vinstr_fill_dump_buffer_kernel( + struct kbase_vinstr_client *cli) +{ + memcpy(cli->kernel_buffer, cli->accum_buffer, cli->dump_size); + + return 0; +} + +/** + * kbasep_vinstr_reprogram - reprogram hwcnt set collected by inst + * @vinstr_ctx: vinstr context + */ +static void kbasep_vinstr_reprogram( + struct kbase_vinstr_context *vinstr_ctx) +{ + unsigned long flags; + bool suspended = false; + + /* Don't enable hardware counters if vinstr is suspended. */ + spin_lock_irqsave(&vinstr_ctx->state_lock, flags); + if (VINSTR_IDLE != vinstr_ctx->state) + suspended = true; + spin_unlock_irqrestore(&vinstr_ctx->state_lock, flags); + if (suspended) + return; + + /* Change to suspended state is done while holding vinstr context + * lock. Below code will then no re-enable the instrumentation. */ + + if (vinstr_ctx->reprogram) { + struct kbase_vinstr_client *iter; + + if (!reprogram_hwcnt(vinstr_ctx)) { + vinstr_ctx->reprogram = false; + list_for_each_entry( + iter, + &vinstr_ctx->idle_clients, + list) + iter->pending = false; + list_for_each_entry( + iter, + &vinstr_ctx->waiting_clients, + list) + iter->pending = false; + } + } +} + +/** + * kbasep_vinstr_update_client - copy accumulated counters to user readable + * buffer and notify the user + * @cli: requesting client + * @timestamp: timestamp when counters were collected + * @event_id: id of event that caused triggered counters collection + * + * Return: zero on success + */ +static int kbasep_vinstr_update_client( + struct kbase_vinstr_client *cli, u64 timestamp, + enum base_hwcnt_reader_event event_id) +{ + int rcode = 0; + + /* Copy collected counters to user readable buffer. */ + if (cli->buffer_count) + rcode = kbasep_vinstr_fill_dump_buffer( + cli, timestamp, event_id); + else if (cli->kernel_buffer) + rcode = kbasep_vinstr_fill_dump_buffer_kernel(cli); + else + rcode = kbasep_vinstr_fill_dump_buffer_legacy(cli); + + if (rcode) + goto exit; + + + /* Notify client. Make sure all changes to memory are visible. */ + wmb(); + atomic_inc(&cli->write_idx); + wake_up_interruptible(&cli->waitq); + + /* Prepare for next request. */ + memset(cli->accum_buffer, 0, cli->dump_size); + +exit: + return rcode; +} + +/** + * kbasep_vinstr_wake_up_callback - vinstr wake up timer wake up function + * + * @hrtimer: high resolution timer + * + * Return: High resolution timer restart enum. + */ +static enum hrtimer_restart kbasep_vinstr_wake_up_callback( + struct hrtimer *hrtimer) +{ + struct kbasep_vinstr_wake_up_timer *timer = + container_of( + hrtimer, + struct kbasep_vinstr_wake_up_timer, + hrtimer); + + KBASE_DEBUG_ASSERT(timer); + + atomic_set(&timer->vinstr_ctx->request_pending, 1); + wake_up_all(&timer->vinstr_ctx->waitq); + + return HRTIMER_NORESTART; +} + +/** + * kbasep_vinstr_service_task - HWC dumping service thread + * + * @data: Pointer to vinstr context structure. + * + * Return: 0 on success; -ENOMEM if timer allocation fails + */ +static int kbasep_vinstr_service_task(void *data) +{ + struct kbase_vinstr_context *vinstr_ctx = data; + struct kbasep_vinstr_wake_up_timer *timer; + + KBASE_DEBUG_ASSERT(vinstr_ctx); + + timer = kmalloc(sizeof(*timer), GFP_KERNEL); + + if (!timer) { + dev_warn(vinstr_ctx->kbdev->dev, "Timer allocation failed!\n"); + return -ENOMEM; + } + + hrtimer_init(&timer->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + + timer->hrtimer.function = kbasep_vinstr_wake_up_callback; + timer->vinstr_ctx = vinstr_ctx; + + while (!kthread_should_stop()) { + struct kbase_vinstr_client *cli = NULL; + struct kbase_vinstr_client *tmp; + int rcode; + + u64 timestamp = kbasep_vinstr_get_timestamp(); + u64 dump_time = 0; + struct list_head expired_requests; + + /* Hold lock while performing operations on lists of clients. */ + mutex_lock(&vinstr_ctx->lock); + + /* Closing thread must not interact with client requests. */ + if (current == vinstr_ctx->thread) { + atomic_set(&vinstr_ctx->request_pending, 0); + + if (!list_empty(&vinstr_ctx->waiting_clients)) { + cli = list_first_entry( + &vinstr_ctx->waiting_clients, + struct kbase_vinstr_client, + list); + dump_time = cli->dump_time; + } + } + + if (!cli || ((s64)timestamp - (s64)dump_time < 0ll)) { + mutex_unlock(&vinstr_ctx->lock); + + /* Sleep until next dumping event or service request. */ + if (cli) { + u64 diff = dump_time - timestamp; + + hrtimer_start( + &timer->hrtimer, + ns_to_ktime(diff), + HRTIMER_MODE_REL); + } + wait_event( + vinstr_ctx->waitq, + atomic_read( + &vinstr_ctx->request_pending) || + kthread_should_stop()); + hrtimer_cancel(&timer->hrtimer); + continue; + } + + rcode = kbasep_vinstr_collect_and_accumulate(vinstr_ctx, + ×tamp); + + INIT_LIST_HEAD(&expired_requests); + + /* Find all expired requests. */ + list_for_each_entry_safe( + cli, + tmp, + &vinstr_ctx->waiting_clients, + list) { + s64 tdiff = + (s64)(timestamp + DUMPING_RESOLUTION) - + (s64)cli->dump_time; + if (tdiff >= 0ll) { + list_del(&cli->list); + list_add(&cli->list, &expired_requests); + } else { + break; + } + } + + /* Fill data for each request found. */ + list_for_each_entry_safe(cli, tmp, &expired_requests, list) { + /* Ensure that legacy buffer will not be used from + * this kthread context. */ + BUG_ON(0 == cli->buffer_count); + /* Expect only periodically sampled clients. */ + BUG_ON(0 == cli->dump_interval); + + if (!rcode) + kbasep_vinstr_update_client( + cli, + timestamp, + BASE_HWCNT_READER_EVENT_PERIODIC); + + /* Set new dumping time. Drop missed probing times. */ + do { + cli->dump_time += cli->dump_interval; + } while (cli->dump_time < timestamp); + + list_del(&cli->list); + kbasep_vinstr_add_dump_request( + cli, + &vinstr_ctx->waiting_clients); + } + + /* Reprogram counters set if required. */ + kbasep_vinstr_reprogram(vinstr_ctx); + + mutex_unlock(&vinstr_ctx->lock); + } + + kfree(timer); + + return 0; +} + +/*****************************************************************************/ + +/** + * kbasep_vinstr_hwcnt_reader_buffer_ready - check if client has ready buffers + * @cli: pointer to vinstr client structure + * + * Return: non-zero if client has at least one dumping buffer filled that was + * not notified to user yet + */ +static int kbasep_vinstr_hwcnt_reader_buffer_ready( + struct kbase_vinstr_client *cli) +{ + KBASE_DEBUG_ASSERT(cli); + return atomic_read(&cli->write_idx) != atomic_read(&cli->meta_idx); +} + +/** + * kbasep_vinstr_hwcnt_reader_ioctl_get_buffer - hwcnt reader's ioctl command + * @cli: pointer to vinstr client structure + * @buffer: pointer to userspace buffer + * @size: size of buffer + * + * Return: zero on success + */ +static long kbasep_vinstr_hwcnt_reader_ioctl_get_buffer( + struct kbase_vinstr_client *cli, void __user *buffer, + size_t size) +{ + unsigned int meta_idx = atomic_read(&cli->meta_idx); + unsigned int idx = meta_idx % cli->buffer_count; + + struct kbase_hwcnt_reader_metadata *meta = &cli->dump_buffers_meta[idx]; + + /* Metadata sanity check. */ + KBASE_DEBUG_ASSERT(idx == meta->buffer_idx); + + if (sizeof(struct kbase_hwcnt_reader_metadata) != size) + return -EINVAL; + + /* Check if there is any buffer available. */ + if (atomic_read(&cli->write_idx) == meta_idx) + return -EAGAIN; + + /* Check if previously taken buffer was put back. */ + if (atomic_read(&cli->read_idx) != meta_idx) + return -EBUSY; + + /* Copy next available buffer's metadata to user. */ + if (copy_to_user(buffer, meta, size)) + return -EFAULT; + + atomic_inc(&cli->meta_idx); + + return 0; +} + +/** + * kbasep_vinstr_hwcnt_reader_ioctl_put_buffer - hwcnt reader's ioctl command + * @cli: pointer to vinstr client structure + * @buffer: pointer to userspace buffer + * @size: size of buffer + * + * Return: zero on success + */ +static long kbasep_vinstr_hwcnt_reader_ioctl_put_buffer( + struct kbase_vinstr_client *cli, void __user *buffer, + size_t size) +{ + unsigned int read_idx = atomic_read(&cli->read_idx); + unsigned int idx = read_idx % cli->buffer_count; + + struct kbase_hwcnt_reader_metadata meta; + + if (sizeof(struct kbase_hwcnt_reader_metadata) != size) + return -EINVAL; + + /* Check if any buffer was taken. */ + if (atomic_read(&cli->meta_idx) == read_idx) + return -EPERM; + + /* Check if correct buffer is put back. */ + if (copy_from_user(&meta, buffer, size)) + return -EFAULT; + if (idx != meta.buffer_idx) + return -EINVAL; + + atomic_inc(&cli->read_idx); + + return 0; +} + +/** + * kbasep_vinstr_hwcnt_reader_ioctl_set_interval - hwcnt reader's ioctl command + * @cli: pointer to vinstr client structure + * @interval: periodic dumping interval (disable periodic dumping if zero) + * + * Return: zero on success + */ +static long kbasep_vinstr_hwcnt_reader_ioctl_set_interval( + struct kbase_vinstr_client *cli, u32 interval) +{ + struct kbase_vinstr_context *vinstr_ctx = cli->vinstr_ctx; + + KBASE_DEBUG_ASSERT(vinstr_ctx); + + mutex_lock(&vinstr_ctx->lock); + + list_del(&cli->list); + + cli->dump_interval = interval; + + /* If interval is non-zero, enable periodic dumping for this client. */ + if (cli->dump_interval) { + if (DUMPING_RESOLUTION > cli->dump_interval) + cli->dump_interval = DUMPING_RESOLUTION; + cli->dump_time = + kbasep_vinstr_get_timestamp() + cli->dump_interval; + + kbasep_vinstr_add_dump_request( + cli, &vinstr_ctx->waiting_clients); + + atomic_set(&vinstr_ctx->request_pending, 1); + wake_up_all(&vinstr_ctx->waitq); + } else { + list_add(&cli->list, &vinstr_ctx->idle_clients); + } + + mutex_unlock(&vinstr_ctx->lock); + + return 0; +} + +/** + * kbasep_vinstr_hwcnt_reader_event_mask - return event mask for event id + * @event_id: id of event + * Return: event_mask or zero if event is not supported or maskable + */ +static u32 kbasep_vinstr_hwcnt_reader_event_mask( + enum base_hwcnt_reader_event event_id) +{ + u32 event_mask = 0; + + switch (event_id) { + case BASE_HWCNT_READER_EVENT_PREJOB: + case BASE_HWCNT_READER_EVENT_POSTJOB: + /* These event are maskable. */ + event_mask = (1 << event_id); + break; + + case BASE_HWCNT_READER_EVENT_MANUAL: + case BASE_HWCNT_READER_EVENT_PERIODIC: + /* These event are non-maskable. */ + default: + /* These event are not supported. */ + break; + } + + return event_mask; +} + +/** + * kbasep_vinstr_hwcnt_reader_ioctl_enable_event - hwcnt reader's ioctl command + * @cli: pointer to vinstr client structure + * @event_id: id of event to enable + * + * Return: zero on success + */ +static long kbasep_vinstr_hwcnt_reader_ioctl_enable_event( + struct kbase_vinstr_client *cli, + enum base_hwcnt_reader_event event_id) +{ + struct kbase_vinstr_context *vinstr_ctx = cli->vinstr_ctx; + u32 event_mask; + + KBASE_DEBUG_ASSERT(vinstr_ctx); + + event_mask = kbasep_vinstr_hwcnt_reader_event_mask(event_id); + if (!event_mask) + return -EINVAL; + + mutex_lock(&vinstr_ctx->lock); + cli->event_mask |= event_mask; + mutex_unlock(&vinstr_ctx->lock); + + return 0; +} + +/** + * kbasep_vinstr_hwcnt_reader_ioctl_disable_event - hwcnt reader's ioctl command + * @cli: pointer to vinstr client structure + * @event_id: id of event to disable + * + * Return: zero on success + */ +static long kbasep_vinstr_hwcnt_reader_ioctl_disable_event( + struct kbase_vinstr_client *cli, + enum base_hwcnt_reader_event event_id) +{ + struct kbase_vinstr_context *vinstr_ctx = cli->vinstr_ctx; + u32 event_mask; + + KBASE_DEBUG_ASSERT(vinstr_ctx); + + event_mask = kbasep_vinstr_hwcnt_reader_event_mask(event_id); + if (!event_mask) + return -EINVAL; + + mutex_lock(&vinstr_ctx->lock); + cli->event_mask &= ~event_mask; + mutex_unlock(&vinstr_ctx->lock); + + return 0; +} + +/** + * kbasep_vinstr_hwcnt_reader_ioctl_get_hwver - hwcnt reader's ioctl command + * @cli: pointer to vinstr client structure + * @hwver: pointer to user buffer where hw version will be stored + * + * Return: zero on success + */ +static long kbasep_vinstr_hwcnt_reader_ioctl_get_hwver( + struct kbase_vinstr_client *cli, u32 __user *hwver) +{ +#ifndef CONFIG_MALI_NO_MALI + struct kbase_vinstr_context *vinstr_ctx = cli->vinstr_ctx; +#endif + + u32 ver = 5; + +#ifndef CONFIG_MALI_NO_MALI + KBASE_DEBUG_ASSERT(vinstr_ctx); + if (kbase_hw_has_feature(vinstr_ctx->kbdev, BASE_HW_FEATURE_V4)) + ver = 4; +#endif + + return put_user(ver, hwver); +} + +/** + * kbasep_vinstr_hwcnt_reader_ioctl - hwcnt reader's ioctl + * @filp: pointer to file structure + * @cmd: user command + * @arg: command's argument + * + * Return: zero on success + */ +static long kbasep_vinstr_hwcnt_reader_ioctl(struct file *filp, + unsigned int cmd, unsigned long arg) +{ + long rcode = 0; + struct kbase_vinstr_client *cli; + + KBASE_DEBUG_ASSERT(filp); + + cli = filp->private_data; + KBASE_DEBUG_ASSERT(cli); + + if (unlikely(KBASE_HWCNT_READER != _IOC_TYPE(cmd))) + return -EINVAL; + + switch (cmd) { + case KBASE_HWCNT_READER_GET_API_VERSION: + rcode = put_user(HWCNT_READER_API, (u32 __user *)arg); + break; + case KBASE_HWCNT_READER_GET_HWVER: + rcode = kbasep_vinstr_hwcnt_reader_ioctl_get_hwver( + cli, (u32 __user *)arg); + break; + case KBASE_HWCNT_READER_GET_BUFFER_SIZE: + KBASE_DEBUG_ASSERT(cli->vinstr_ctx); + rcode = put_user( + (u32)cli->vinstr_ctx->dump_size, + (u32 __user *)arg); + break; + case KBASE_HWCNT_READER_DUMP: + rcode = kbase_vinstr_hwc_dump( + cli, BASE_HWCNT_READER_EVENT_MANUAL); + break; + case KBASE_HWCNT_READER_CLEAR: + rcode = kbase_vinstr_hwc_clear(cli); + break; + case KBASE_HWCNT_READER_GET_BUFFER: + rcode = kbasep_vinstr_hwcnt_reader_ioctl_get_buffer( + cli, (void __user *)arg, _IOC_SIZE(cmd)); + break; + case KBASE_HWCNT_READER_PUT_BUFFER: + rcode = kbasep_vinstr_hwcnt_reader_ioctl_put_buffer( + cli, (void __user *)arg, _IOC_SIZE(cmd)); + break; + case KBASE_HWCNT_READER_SET_INTERVAL: + rcode = kbasep_vinstr_hwcnt_reader_ioctl_set_interval( + cli, (u32)arg); + break; + case KBASE_HWCNT_READER_ENABLE_EVENT: + rcode = kbasep_vinstr_hwcnt_reader_ioctl_enable_event( + cli, (enum base_hwcnt_reader_event)arg); + break; + case KBASE_HWCNT_READER_DISABLE_EVENT: + rcode = kbasep_vinstr_hwcnt_reader_ioctl_disable_event( + cli, (enum base_hwcnt_reader_event)arg); + break; + default: + rcode = -EINVAL; + break; + } + + return rcode; +} + +/** + * kbasep_vinstr_hwcnt_reader_poll - hwcnt reader's poll + * @filp: pointer to file structure + * @wait: pointer to poll table + * Return: POLLIN if data can be read without blocking, otherwise zero + */ +static unsigned int kbasep_vinstr_hwcnt_reader_poll(struct file *filp, + poll_table *wait) +{ + struct kbase_vinstr_client *cli; + + KBASE_DEBUG_ASSERT(filp); + KBASE_DEBUG_ASSERT(wait); + + cli = filp->private_data; + KBASE_DEBUG_ASSERT(cli); + + poll_wait(filp, &cli->waitq, wait); + if (kbasep_vinstr_hwcnt_reader_buffer_ready(cli)) + return POLLIN; + return 0; +} + +/** + * kbasep_vinstr_hwcnt_reader_mmap - hwcnt reader's mmap + * @filp: pointer to file structure + * @vma: pointer to vma structure + * Return: zero on success + */ +static int kbasep_vinstr_hwcnt_reader_mmap(struct file *filp, + struct vm_area_struct *vma) +{ + struct kbase_vinstr_client *cli; + unsigned long size, addr, pfn, offset; + unsigned long vm_size = vma->vm_end - vma->vm_start; + + KBASE_DEBUG_ASSERT(filp); + KBASE_DEBUG_ASSERT(vma); + + cli = filp->private_data; + KBASE_DEBUG_ASSERT(cli); + + size = cli->buffer_count * cli->dump_size; + + if (vma->vm_pgoff > (size >> PAGE_SHIFT)) + return -EINVAL; + + offset = vma->vm_pgoff << PAGE_SHIFT; + if (vm_size > size - offset) + return -EINVAL; + + addr = __pa((unsigned long)cli->dump_buffers + offset); + pfn = addr >> PAGE_SHIFT; + + return remap_pfn_range( + vma, + vma->vm_start, + pfn, + vm_size, + vma->vm_page_prot); +} + +/** + * kbasep_vinstr_hwcnt_reader_release - hwcnt reader's release + * @inode: pointer to inode structure + * @filp: pointer to file structure + * Return always return zero + */ +static int kbasep_vinstr_hwcnt_reader_release(struct inode *inode, + struct file *filp) +{ + struct kbase_vinstr_client *cli; + + KBASE_DEBUG_ASSERT(inode); + KBASE_DEBUG_ASSERT(filp); + + cli = filp->private_data; + KBASE_DEBUG_ASSERT(cli); + + kbase_vinstr_detach_client(cli); + return 0; +} + +/*****************************************************************************/ + +/** + * kbasep_vinstr_kick_scheduler - trigger scheduler cycle + * @kbdev: pointer to kbase device structure + */ +static void kbasep_vinstr_kick_scheduler(struct kbase_device *kbdev) +{ + struct kbasep_js_device_data *js_devdata = &kbdev->js_data; + unsigned long flags; + + down(&js_devdata->schedule_sem); + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); + kbase_backend_slot_update(kbdev); + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); + up(&js_devdata->schedule_sem); +} + +/** + * kbasep_vinstr_suspend_worker - worker suspending vinstr module + * @data: pointer to work structure + */ +static void kbasep_vinstr_suspend_worker(struct work_struct *data) +{ + struct kbase_vinstr_context *vinstr_ctx; + unsigned long flags; + + vinstr_ctx = container_of(data, struct kbase_vinstr_context, + suspend_work); + + mutex_lock(&vinstr_ctx->lock); + + if (vinstr_ctx->kctx) + disable_hwcnt(vinstr_ctx); + + spin_lock_irqsave(&vinstr_ctx->state_lock, flags); + vinstr_ctx->state = VINSTR_SUSPENDED; + wake_up_all(&vinstr_ctx->suspend_waitq); + spin_unlock_irqrestore(&vinstr_ctx->state_lock, flags); + + mutex_unlock(&vinstr_ctx->lock); + + /* Kick GPU scheduler to allow entering protected mode. + * This must happen after vinstr was suspended. */ + kbasep_vinstr_kick_scheduler(vinstr_ctx->kbdev); +} + +/** + * kbasep_vinstr_suspend_worker - worker resuming vinstr module + * @data: pointer to work structure + */ +static void kbasep_vinstr_resume_worker(struct work_struct *data) +{ + struct kbase_vinstr_context *vinstr_ctx; + unsigned long flags; + + vinstr_ctx = container_of(data, struct kbase_vinstr_context, + resume_work); + + mutex_lock(&vinstr_ctx->lock); + + if (vinstr_ctx->kctx) + enable_hwcnt(vinstr_ctx); + + spin_lock_irqsave(&vinstr_ctx->state_lock, flags); + vinstr_ctx->state = VINSTR_IDLE; + wake_up_all(&vinstr_ctx->suspend_waitq); + spin_unlock_irqrestore(&vinstr_ctx->state_lock, flags); + + mutex_unlock(&vinstr_ctx->lock); + + /* Kick GPU scheduler to allow entering protected mode. + * Note that scheduler state machine might requested re-entry to + * protected mode before vinstr was resumed. + * This must happen after vinstr was release. */ + kbasep_vinstr_kick_scheduler(vinstr_ctx->kbdev); +} + +/*****************************************************************************/ + +struct kbase_vinstr_context *kbase_vinstr_init(struct kbase_device *kbdev) +{ + struct kbase_vinstr_context *vinstr_ctx; + + vinstr_ctx = kzalloc(sizeof(*vinstr_ctx), GFP_KERNEL); + if (!vinstr_ctx) + return NULL; + + INIT_LIST_HEAD(&vinstr_ctx->idle_clients); + INIT_LIST_HEAD(&vinstr_ctx->waiting_clients); + mutex_init(&vinstr_ctx->lock); + spin_lock_init(&vinstr_ctx->state_lock); + vinstr_ctx->kbdev = kbdev; + vinstr_ctx->thread = NULL; + vinstr_ctx->state = VINSTR_IDLE; + vinstr_ctx->suspend_cnt = 0; + INIT_WORK(&vinstr_ctx->suspend_work, kbasep_vinstr_suspend_worker); + INIT_WORK(&vinstr_ctx->resume_work, kbasep_vinstr_resume_worker); + init_waitqueue_head(&vinstr_ctx->suspend_waitq); + + atomic_set(&vinstr_ctx->request_pending, 0); + init_waitqueue_head(&vinstr_ctx->waitq); + + return vinstr_ctx; +} + +void kbase_vinstr_term(struct kbase_vinstr_context *vinstr_ctx) +{ + struct kbase_vinstr_client *cli; + + /* Stop service thread first. */ + if (vinstr_ctx->thread) + kthread_stop(vinstr_ctx->thread); + + /* Wait for workers. */ + flush_work(&vinstr_ctx->suspend_work); + flush_work(&vinstr_ctx->resume_work); + + while (1) { + struct list_head *list = &vinstr_ctx->idle_clients; + + if (list_empty(list)) { + list = &vinstr_ctx->waiting_clients; + if (list_empty(list)) + break; + } + + cli = list_first_entry(list, struct kbase_vinstr_client, list); + list_del(&cli->list); + kfree(cli->accum_buffer); + kfree(cli); + vinstr_ctx->nclients--; + } + KBASE_DEBUG_ASSERT(!vinstr_ctx->nclients); + if (vinstr_ctx->kctx) + kbasep_vinstr_destroy_kctx(vinstr_ctx); + kfree(vinstr_ctx); +} + +int kbase_vinstr_hwcnt_reader_setup(struct kbase_vinstr_context *vinstr_ctx, + struct kbase_uk_hwcnt_reader_setup *setup) +{ + struct kbase_vinstr_client *cli; + u32 bitmap[4]; + + KBASE_DEBUG_ASSERT(vinstr_ctx); + KBASE_DEBUG_ASSERT(setup); + KBASE_DEBUG_ASSERT(setup->buffer_count); + + bitmap[SHADER_HWCNT_BM] = setup->shader_bm; + bitmap[TILER_HWCNT_BM] = setup->tiler_bm; + bitmap[MMU_L2_HWCNT_BM] = setup->mmu_l2_bm; + bitmap[JM_HWCNT_BM] = setup->jm_bm; + + cli = kbasep_vinstr_attach_client( + vinstr_ctx, + setup->buffer_count, + bitmap, + &setup->fd, + NULL); + + if (!cli) + return -ENOMEM; + + return 0; +} + +int kbase_vinstr_legacy_hwc_setup( + struct kbase_vinstr_context *vinstr_ctx, + struct kbase_vinstr_client **cli, + struct kbase_uk_hwcnt_setup *setup) +{ + KBASE_DEBUG_ASSERT(vinstr_ctx); + KBASE_DEBUG_ASSERT(setup); + KBASE_DEBUG_ASSERT(cli); + + if (setup->dump_buffer) { + u32 bitmap[4]; + + bitmap[SHADER_HWCNT_BM] = setup->shader_bm; + bitmap[TILER_HWCNT_BM] = setup->tiler_bm; + bitmap[MMU_L2_HWCNT_BM] = setup->mmu_l2_bm; + bitmap[JM_HWCNT_BM] = setup->jm_bm; + + if (*cli) + return -EBUSY; + + *cli = kbasep_vinstr_attach_client( + vinstr_ctx, + 0, + bitmap, + (void *)(long)setup->dump_buffer, + NULL); + + if (!(*cli)) + return -ENOMEM; + } else { + if (!*cli) + return -EINVAL; + + kbase_vinstr_detach_client(*cli); + *cli = NULL; + } + + return 0; +} + +struct kbase_vinstr_client *kbase_vinstr_hwcnt_kernel_setup( + struct kbase_vinstr_context *vinstr_ctx, + struct kbase_uk_hwcnt_reader_setup *setup, + void *kernel_buffer) +{ + u32 bitmap[4]; + + if (!vinstr_ctx || !setup || !kernel_buffer) + return NULL; + + bitmap[SHADER_HWCNT_BM] = setup->shader_bm; + bitmap[TILER_HWCNT_BM] = setup->tiler_bm; + bitmap[MMU_L2_HWCNT_BM] = setup->mmu_l2_bm; + bitmap[JM_HWCNT_BM] = setup->jm_bm; + + return kbasep_vinstr_attach_client( + vinstr_ctx, + 0, + bitmap, + NULL, + kernel_buffer); +} +KBASE_EXPORT_TEST_API(kbase_vinstr_hwcnt_kernel_setup); + +int kbase_vinstr_hwc_dump(struct kbase_vinstr_client *cli, + enum base_hwcnt_reader_event event_id) +{ + int rcode = 0; + struct kbase_vinstr_context *vinstr_ctx; + u64 timestamp; + u32 event_mask; + + if (!cli) + return -EINVAL; + + vinstr_ctx = cli->vinstr_ctx; + KBASE_DEBUG_ASSERT(vinstr_ctx); + + KBASE_DEBUG_ASSERT(event_id < BASE_HWCNT_READER_EVENT_COUNT); + event_mask = 1 << event_id; + + mutex_lock(&vinstr_ctx->lock); + + if (event_mask & cli->event_mask) { + rcode = kbasep_vinstr_collect_and_accumulate( + vinstr_ctx, + ×tamp); + if (rcode) + goto exit; + + rcode = kbasep_vinstr_update_client(cli, timestamp, event_id); + if (rcode) + goto exit; + + kbasep_vinstr_reprogram(vinstr_ctx); + } + +exit: + mutex_unlock(&vinstr_ctx->lock); + + return rcode; +} +KBASE_EXPORT_TEST_API(kbase_vinstr_hwc_dump); + +int kbase_vinstr_hwc_clear(struct kbase_vinstr_client *cli) +{ + struct kbase_vinstr_context *vinstr_ctx; + int rcode; + u64 unused; + + if (!cli) + return -EINVAL; + + vinstr_ctx = cli->vinstr_ctx; + KBASE_DEBUG_ASSERT(vinstr_ctx); + + mutex_lock(&vinstr_ctx->lock); + + rcode = kbasep_vinstr_collect_and_accumulate(vinstr_ctx, &unused); + if (rcode) + goto exit; + rcode = kbase_instr_hwcnt_clear(vinstr_ctx->kctx); + if (rcode) + goto exit; + memset(cli->accum_buffer, 0, cli->dump_size); + + kbasep_vinstr_reprogram(vinstr_ctx); + +exit: + mutex_unlock(&vinstr_ctx->lock); + + return rcode; +} + +int kbase_vinstr_try_suspend(struct kbase_vinstr_context *vinstr_ctx) +{ + unsigned long flags; + int ret = -EAGAIN; + + KBASE_DEBUG_ASSERT(vinstr_ctx); + + spin_lock_irqsave(&vinstr_ctx->state_lock, flags); + switch (vinstr_ctx->state) { + case VINSTR_SUSPENDED: + vinstr_ctx->suspend_cnt++; + /* overflow shall not happen */ + BUG_ON(0 == vinstr_ctx->suspend_cnt); + ret = 0; + break; + + case VINSTR_IDLE: + if (vinstr_ctx->clients_present) { + vinstr_ctx->state = VINSTR_SUSPENDING; + schedule_work(&vinstr_ctx->suspend_work); + } else { + vinstr_ctx->state = VINSTR_SUSPENDED; + + vinstr_ctx->suspend_cnt++; + /* overflow shall not happen */ + WARN_ON(0 == vinstr_ctx->suspend_cnt); + ret = 0; + } + break; + + case VINSTR_DUMPING: + vinstr_ctx->state = VINSTR_SUSPENDING; + break; + + case VINSTR_SUSPENDING: + /* fall through */ + case VINSTR_RESUMING: + break; + + default: + BUG(); + break; + } + spin_unlock_irqrestore(&vinstr_ctx->state_lock, flags); + + return ret; +} + +void kbase_vinstr_suspend(struct kbase_vinstr_context *vinstr_ctx) +{ + wait_event(vinstr_ctx->suspend_waitq, + (0 == kbase_vinstr_try_suspend(vinstr_ctx))); +} + +void kbase_vinstr_resume(struct kbase_vinstr_context *vinstr_ctx) +{ + unsigned long flags; + + KBASE_DEBUG_ASSERT(vinstr_ctx); + + spin_lock_irqsave(&vinstr_ctx->state_lock, flags); + BUG_ON(VINSTR_SUSPENDING == vinstr_ctx->state); + if (VINSTR_SUSPENDED == vinstr_ctx->state) { + BUG_ON(0 == vinstr_ctx->suspend_cnt); + vinstr_ctx->suspend_cnt--; + if (0 == vinstr_ctx->suspend_cnt) { + if (vinstr_ctx->clients_present) { + vinstr_ctx->state = VINSTR_RESUMING; + schedule_work(&vinstr_ctx->resume_work); + } else { + vinstr_ctx->state = VINSTR_IDLE; + } + } + } + spin_unlock_irqrestore(&vinstr_ctx->state_lock, flags); +} diff --git a/drivers/gpu/arm/midgard/mali_kbase_vinstr.h b/drivers/gpu/arm/midgard/mali_kbase_vinstr.h new file mode 100644 index 00000000000000..4dbf7eedce119c --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_kbase_vinstr.h @@ -0,0 +1,177 @@ +/* + * + * (C) COPYRIGHT 2015-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +#ifndef _KBASE_VINSTR_H_ +#define _KBASE_VINSTR_H_ + +#include + +/*****************************************************************************/ + +struct kbase_vinstr_context; +struct kbase_vinstr_client; + +struct kbase_uk_hwcnt_setup { + /* IN */ + u64 dump_buffer; + u32 jm_bm; + u32 shader_bm; + u32 tiler_bm; + u32 unused_1; /* keep for backwards compatibility */ + u32 mmu_l2_bm; + u32 padding; + /* OUT */ +}; + +struct kbase_uk_hwcnt_reader_setup { + /* IN */ + u32 buffer_count; + u32 jm_bm; + u32 shader_bm; + u32 tiler_bm; + u32 mmu_l2_bm; + + /* OUT */ + s32 fd; +}; +/*****************************************************************************/ + +/** + * kbase_vinstr_init() - initialize the vinstr core + * @kbdev: kbase device + * + * Return: pointer to the vinstr context on success or NULL on failure + */ +struct kbase_vinstr_context *kbase_vinstr_init(struct kbase_device *kbdev); + +/** + * kbase_vinstr_term() - terminate the vinstr core + * @vinstr_ctx: vinstr context + */ +void kbase_vinstr_term(struct kbase_vinstr_context *vinstr_ctx); + +/** + * kbase_vinstr_hwcnt_reader_setup - configure hw counters reader + * @vinstr_ctx: vinstr context + * @setup: reader's configuration + * + * Return: zero on success + */ +int kbase_vinstr_hwcnt_reader_setup( + struct kbase_vinstr_context *vinstr_ctx, + struct kbase_uk_hwcnt_reader_setup *setup); + +/** + * kbase_vinstr_legacy_hwc_setup - configure hw counters for dumping + * @vinstr_ctx: vinstr context + * @cli: pointer where to store pointer to new vinstr client structure + * @setup: hwc configuration + * + * Return: zero on success + */ +int kbase_vinstr_legacy_hwc_setup( + struct kbase_vinstr_context *vinstr_ctx, + struct kbase_vinstr_client **cli, + struct kbase_uk_hwcnt_setup *setup); + +/** + * kbase_vinstr_hwcnt_kernel_setup - configure hw counters for kernel side + * client + * @vinstr_ctx: vinstr context + * @setup: reader's configuration + * @kernel_buffer: pointer to dump buffer + * + * setup->buffer_count and setup->fd are not used for kernel side clients. + * + * Return: pointer to client structure, or NULL on failure + */ +struct kbase_vinstr_client *kbase_vinstr_hwcnt_kernel_setup( + struct kbase_vinstr_context *vinstr_ctx, + struct kbase_uk_hwcnt_reader_setup *setup, + void *kernel_buffer); + +/** + * kbase_vinstr_hwc_dump - issue counter dump for vinstr client + * @cli: pointer to vinstr client + * @event_id: id of event that triggered hwcnt dump + * + * Return: zero on success + */ +int kbase_vinstr_hwc_dump( + struct kbase_vinstr_client *cli, + enum base_hwcnt_reader_event event_id); + +/** + * kbase_vinstr_hwc_clear - performs a reset of the hardware counters for + * a given kbase context + * @cli: pointer to vinstr client + * + * Return: zero on success + */ +int kbase_vinstr_hwc_clear(struct kbase_vinstr_client *cli); + +/** + * kbase_vinstr_try_suspend - try suspending operation of a given vinstr context + * @vinstr_ctx: vinstr context + * + * Return: 0 on success, or negative if state change is in progress + * + * Warning: This API call is non-generic. It is meant to be used only by + * job scheduler state machine. + * + * Function initiates vinstr switch to suspended state. Once it was called + * vinstr enters suspending state. If function return non-zero value, it + * indicates that state switch is not complete and function must be called + * again. On state switch vinstr will trigger job scheduler state machine + * cycle. + */ +int kbase_vinstr_try_suspend(struct kbase_vinstr_context *vinstr_ctx); + +/** + * kbase_vinstr_suspend - suspends operation of a given vinstr context + * @vinstr_ctx: vinstr context + * + * Function initiates vinstr switch to suspended state. Then it blocks until + * operation is completed. + */ +void kbase_vinstr_suspend(struct kbase_vinstr_context *vinstr_ctx); + +/** + * kbase_vinstr_resume - resumes operation of a given vinstr context + * @vinstr_ctx: vinstr context + * + * Function can be called only if it was preceded by a successful call + * to kbase_vinstr_suspend. + */ +void kbase_vinstr_resume(struct kbase_vinstr_context *vinstr_ctx); + +/** + * kbase_vinstr_dump_size - Return required size of dump buffer + * @kbdev: device pointer + * + * Return : buffer size in bytes + */ +size_t kbase_vinstr_dump_size(struct kbase_device *kbdev); + +/** + * kbase_vinstr_detach_client - Detach a client from the vinstr core + * @cli: pointer to vinstr client + */ +void kbase_vinstr_detach_client(struct kbase_vinstr_client *cli); + +#endif /* _KBASE_VINSTR_H_ */ + diff --git a/drivers/gpu/arm/midgard/mali_linux_kbase_trace.h b/drivers/gpu/arm/midgard/mali_linux_kbase_trace.h new file mode 100644 index 00000000000000..5d6b4021d626f8 --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_linux_kbase_trace.h @@ -0,0 +1,201 @@ +/* + * + * (C) COPYRIGHT 2014 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + +#if !defined(_TRACE_MALI_KBASE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_MALI_KBASE_H + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM mali + +#include + +DECLARE_EVENT_CLASS(mali_slot_template, + TP_PROTO(int jobslot, unsigned int info_val), + TP_ARGS(jobslot, info_val), + TP_STRUCT__entry( + __field(unsigned int, jobslot) + __field(unsigned int, info_val) + ), + TP_fast_assign( + __entry->jobslot = jobslot; + __entry->info_val = info_val; + ), + TP_printk("jobslot=%u info=%u", __entry->jobslot, __entry->info_val) +); + +#define DEFINE_MALI_SLOT_EVENT(name) \ +DEFINE_EVENT(mali_slot_template, mali_##name, \ + TP_PROTO(int jobslot, unsigned int info_val), \ + TP_ARGS(jobslot, info_val)) +DEFINE_MALI_SLOT_EVENT(JM_SUBMIT); +DEFINE_MALI_SLOT_EVENT(JM_JOB_DONE); +DEFINE_MALI_SLOT_EVENT(JM_UPDATE_HEAD); +DEFINE_MALI_SLOT_EVENT(JM_CHECK_HEAD); +DEFINE_MALI_SLOT_EVENT(JM_SOFTSTOP); +DEFINE_MALI_SLOT_EVENT(JM_SOFTSTOP_0); +DEFINE_MALI_SLOT_EVENT(JM_SOFTSTOP_1); +DEFINE_MALI_SLOT_EVENT(JM_HARDSTOP); +DEFINE_MALI_SLOT_EVENT(JM_HARDSTOP_0); +DEFINE_MALI_SLOT_EVENT(JM_HARDSTOP_1); +DEFINE_MALI_SLOT_EVENT(JM_SLOT_SOFT_OR_HARD_STOP); +DEFINE_MALI_SLOT_EVENT(JM_SLOT_EVICT); +DEFINE_MALI_SLOT_EVENT(JM_BEGIN_RESET_WORKER); +DEFINE_MALI_SLOT_EVENT(JM_END_RESET_WORKER); +DEFINE_MALI_SLOT_EVENT(JS_CORE_REF_REGISTER_ON_RECHECK_FAILED); +DEFINE_MALI_SLOT_EVENT(JS_AFFINITY_SUBMIT_TO_BLOCKED); +DEFINE_MALI_SLOT_EVENT(JS_AFFINITY_CURRENT); +DEFINE_MALI_SLOT_EVENT(JD_DONE_TRY_RUN_NEXT_JOB); +DEFINE_MALI_SLOT_EVENT(JS_CORE_REF_REQUEST_CORES_FAILED); +DEFINE_MALI_SLOT_EVENT(JS_CORE_REF_REGISTER_INUSE_FAILED); +DEFINE_MALI_SLOT_EVENT(JS_CORE_REF_REQUEST_ON_RECHECK_FAILED); +DEFINE_MALI_SLOT_EVENT(JS_CORE_REF_AFFINITY_WOULD_VIOLATE); +DEFINE_MALI_SLOT_EVENT(JS_JOB_DONE_TRY_RUN_NEXT_JOB); +DEFINE_MALI_SLOT_EVENT(JS_JOB_DONE_RETRY_NEEDED); +DEFINE_MALI_SLOT_EVENT(JS_POLICY_DEQUEUE_JOB); +DEFINE_MALI_SLOT_EVENT(JS_POLICY_DEQUEUE_JOB_IRQ); +#undef DEFINE_MALI_SLOT_EVENT + +DECLARE_EVENT_CLASS(mali_refcount_template, + TP_PROTO(int refcount, unsigned int info_val), + TP_ARGS(refcount, info_val), + TP_STRUCT__entry( + __field(unsigned int, refcount) + __field(unsigned int, info_val) + ), + TP_fast_assign( + __entry->refcount = refcount; + __entry->info_val = info_val; + ), + TP_printk("refcount=%u info=%u", __entry->refcount, __entry->info_val) +); + +#define DEFINE_MALI_REFCOUNT_EVENT(name) \ +DEFINE_EVENT(mali_refcount_template, mali_##name, \ + TP_PROTO(int refcount, unsigned int info_val), \ + TP_ARGS(refcount, info_val)) +DEFINE_MALI_REFCOUNT_EVENT(JS_RETAIN_CTX_NOLOCK); +DEFINE_MALI_REFCOUNT_EVENT(JS_ADD_JOB); +DEFINE_MALI_REFCOUNT_EVENT(JS_REMOVE_JOB); +DEFINE_MALI_REFCOUNT_EVENT(JS_RETAIN_CTX); +DEFINE_MALI_REFCOUNT_EVENT(JS_RELEASE_CTX); +DEFINE_MALI_REFCOUNT_EVENT(JS_TRY_SCHEDULE_HEAD_CTX); +DEFINE_MALI_REFCOUNT_EVENT(JS_POLICY_INIT_CTX); +DEFINE_MALI_REFCOUNT_EVENT(JS_POLICY_TERM_CTX); +DEFINE_MALI_REFCOUNT_EVENT(JS_POLICY_ENQUEUE_CTX); +DEFINE_MALI_REFCOUNT_EVENT(JS_POLICY_DEQUEUE_HEAD_CTX); +DEFINE_MALI_REFCOUNT_EVENT(JS_POLICY_TRY_EVICT_CTX); +DEFINE_MALI_REFCOUNT_EVENT(JS_POLICY_RUNPOOL_ADD_CTX); +DEFINE_MALI_REFCOUNT_EVENT(JS_POLICY_RUNPOOL_REMOVE_CTX); +DEFINE_MALI_REFCOUNT_EVENT(JS_POLICY_FOREACH_CTX_JOBS); +DEFINE_MALI_REFCOUNT_EVENT(PM_CONTEXT_ACTIVE); +DEFINE_MALI_REFCOUNT_EVENT(PM_CONTEXT_IDLE); +#undef DEFINE_MALI_REFCOUNT_EVENT + +DECLARE_EVENT_CLASS(mali_add_template, + TP_PROTO(int gpu_addr, unsigned int info_val), + TP_ARGS(gpu_addr, info_val), + TP_STRUCT__entry( + __field(unsigned int, gpu_addr) + __field(unsigned int, info_val) + ), + TP_fast_assign( + __entry->gpu_addr = gpu_addr; + __entry->info_val = info_val; + ), + TP_printk("gpu_addr=%u info=%u", __entry->gpu_addr, __entry->info_val) +); + +#define DEFINE_MALI_ADD_EVENT(name) \ +DEFINE_EVENT(mali_add_template, mali_##name, \ + TP_PROTO(int gpu_addr, unsigned int info_val), \ + TP_ARGS(gpu_addr, info_val)) +DEFINE_MALI_ADD_EVENT(CORE_CTX_DESTROY); +DEFINE_MALI_ADD_EVENT(CORE_CTX_HWINSTR_TERM); +DEFINE_MALI_ADD_EVENT(CORE_GPU_IRQ); +DEFINE_MALI_ADD_EVENT(CORE_GPU_IRQ_CLEAR); +DEFINE_MALI_ADD_EVENT(CORE_GPU_IRQ_DONE); +DEFINE_MALI_ADD_EVENT(CORE_GPU_SOFT_RESET); +DEFINE_MALI_ADD_EVENT(CORE_GPU_HARD_RESET); +DEFINE_MALI_ADD_EVENT(CORE_GPU_PRFCNT_SAMPLE); +DEFINE_MALI_ADD_EVENT(CORE_GPU_PRFCNT_CLEAR); +DEFINE_MALI_ADD_EVENT(CORE_GPU_CLEAN_INV_CACHES); +DEFINE_MALI_ADD_EVENT(JD_DONE_WORKER); +DEFINE_MALI_ADD_EVENT(JD_DONE_WORKER_END); +DEFINE_MALI_ADD_EVENT(JD_CANCEL_WORKER); +DEFINE_MALI_ADD_EVENT(JD_DONE); +DEFINE_MALI_ADD_EVENT(JD_CANCEL); +DEFINE_MALI_ADD_EVENT(JD_ZAP_CONTEXT); +DEFINE_MALI_ADD_EVENT(JM_IRQ); +DEFINE_MALI_ADD_EVENT(JM_IRQ_END); +DEFINE_MALI_ADD_EVENT(JM_FLUSH_WORKQS); +DEFINE_MALI_ADD_EVENT(JM_FLUSH_WORKQS_DONE); +DEFINE_MALI_ADD_EVENT(JM_ZAP_NON_SCHEDULED); +DEFINE_MALI_ADD_EVENT(JM_ZAP_SCHEDULED); +DEFINE_MALI_ADD_EVENT(JM_ZAP_DONE); +DEFINE_MALI_ADD_EVENT(JM_SUBMIT_AFTER_RESET); +DEFINE_MALI_ADD_EVENT(JM_JOB_COMPLETE); +DEFINE_MALI_ADD_EVENT(JS_FAST_START_EVICTS_CTX); +DEFINE_MALI_ADD_EVENT(JS_CTX_ATTR_NOW_ON_RUNPOOL); +DEFINE_MALI_ADD_EVENT(JS_CTX_ATTR_NOW_OFF_RUNPOOL); +DEFINE_MALI_ADD_EVENT(JS_CTX_ATTR_NOW_ON_CTX); +DEFINE_MALI_ADD_EVENT(JS_CTX_ATTR_NOW_OFF_CTX); +DEFINE_MALI_ADD_EVENT(JS_POLICY_TIMER_END); +DEFINE_MALI_ADD_EVENT(JS_POLICY_TIMER_START); +DEFINE_MALI_ADD_EVENT(JS_POLICY_ENQUEUE_JOB); +DEFINE_MALI_ADD_EVENT(PM_CORES_CHANGE_DESIRED); +DEFINE_MALI_ADD_EVENT(PM_JOB_SUBMIT_AFTER_POWERING_UP); +DEFINE_MALI_ADD_EVENT(PM_JOB_SUBMIT_AFTER_POWERED_UP); +DEFINE_MALI_ADD_EVENT(PM_PWRON); +DEFINE_MALI_ADD_EVENT(PM_PWRON_TILER); +DEFINE_MALI_ADD_EVENT(PM_PWRON_L2); +DEFINE_MALI_ADD_EVENT(PM_PWROFF); +DEFINE_MALI_ADD_EVENT(PM_PWROFF_TILER); +DEFINE_MALI_ADD_EVENT(PM_PWROFF_L2); +DEFINE_MALI_ADD_EVENT(PM_CORES_POWERED); +DEFINE_MALI_ADD_EVENT(PM_CORES_POWERED_TILER); +DEFINE_MALI_ADD_EVENT(PM_CORES_POWERED_L2); +DEFINE_MALI_ADD_EVENT(PM_DESIRED_REACHED); +DEFINE_MALI_ADD_EVENT(PM_DESIRED_REACHED_TILER); +DEFINE_MALI_ADD_EVENT(PM_UNREQUEST_CHANGE_SHADER_NEEDED); +DEFINE_MALI_ADD_EVENT(PM_REQUEST_CHANGE_SHADER_NEEDED); +DEFINE_MALI_ADD_EVENT(PM_REGISTER_CHANGE_SHADER_NEEDED); +DEFINE_MALI_ADD_EVENT(PM_REGISTER_CHANGE_SHADER_INUSE); +DEFINE_MALI_ADD_EVENT(PM_RELEASE_CHANGE_SHADER_INUSE); +DEFINE_MALI_ADD_EVENT(PM_CORES_AVAILABLE); +DEFINE_MALI_ADD_EVENT(PM_CORES_AVAILABLE_TILER); +DEFINE_MALI_ADD_EVENT(PM_CORES_CHANGE_AVAILABLE); +DEFINE_MALI_ADD_EVENT(PM_CORES_CHANGE_AVAILABLE_TILER); +DEFINE_MALI_ADD_EVENT(PM_GPU_ON); +DEFINE_MALI_ADD_EVENT(PM_GPU_OFF); +DEFINE_MALI_ADD_EVENT(PM_SET_POLICY); +DEFINE_MALI_ADD_EVENT(PM_CURRENT_POLICY_INIT); +DEFINE_MALI_ADD_EVENT(PM_CURRENT_POLICY_TERM); +DEFINE_MALI_ADD_EVENT(PM_CA_SET_POLICY); +DEFINE_MALI_ADD_EVENT(PM_WAKE_WAITERS); +#undef DEFINE_MALI_ADD_EVENT + +#endif /* _TRACE_MALI_KBASE_H */ + +#undef TRACE_INCLUDE_PATH +#undef linux +#define TRACE_INCLUDE_PATH . +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE mali_linux_kbase_trace + +/* This part must be outside protection */ +#include diff --git a/drivers/gpu/arm/midgard/mali_linux_trace.h b/drivers/gpu/arm/midgard/mali_linux_trace.h new file mode 100644 index 00000000000000..2be06a5527689c --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_linux_trace.h @@ -0,0 +1,189 @@ +/* + * + * (C) COPYRIGHT 2011-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +#if !defined(_TRACE_MALI_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_MALI_H + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM mali +#define TRACE_INCLUDE_FILE mali_linux_trace + +#include + +#define MALI_JOB_SLOTS_EVENT_CHANGED + +/** + * mali_job_slots_event - called from mali_kbase_core_linux.c + * @event_id: ORed together bitfields representing a type of event, made with the GATOR_MAKE_EVENT() macro. + */ +TRACE_EVENT(mali_job_slots_event, + TP_PROTO(unsigned int event_id, unsigned int tgid, unsigned int pid, + unsigned char job_id), + TP_ARGS(event_id, tgid, pid, job_id), + TP_STRUCT__entry( + __field(unsigned int, event_id) + __field(unsigned int, tgid) + __field(unsigned int, pid) + __field(unsigned char, job_id) + ), + TP_fast_assign( + __entry->event_id = event_id; + __entry->tgid = tgid; + __entry->pid = pid; + __entry->job_id = job_id; + ), + TP_printk("event=%u tgid=%u pid=%u job_id=%u", + __entry->event_id, __entry->tgid, __entry->pid, __entry->job_id) +); + +/** + * mali_pm_status - Called by mali_kbase_pm_driver.c + * @event_id: core type (shader, tiler, l2 cache) + * @value: 64bits bitmask reporting either power status of the cores (1-ON, 0-OFF) + */ +TRACE_EVENT(mali_pm_status, + TP_PROTO(unsigned int event_id, unsigned long long value), + TP_ARGS(event_id, value), + TP_STRUCT__entry( + __field(unsigned int, event_id) + __field(unsigned long long, value) + ), + TP_fast_assign( + __entry->event_id = event_id; + __entry->value = value; + ), + TP_printk("event %u = %llu", __entry->event_id, __entry->value) +); + +/** + * mali_pm_power_on - Called by mali_kbase_pm_driver.c + * @event_id: core type (shader, tiler, l2 cache) + * @value: 64bits bitmask reporting the cores to power up + */ +TRACE_EVENT(mali_pm_power_on, + TP_PROTO(unsigned int event_id, unsigned long long value), + TP_ARGS(event_id, value), + TP_STRUCT__entry( + __field(unsigned int, event_id) + __field(unsigned long long, value) + ), + TP_fast_assign( + __entry->event_id = event_id; + __entry->value = value; + ), + TP_printk("event %u = %llu", __entry->event_id, __entry->value) +); + +/** + * mali_pm_power_off - Called by mali_kbase_pm_driver.c + * @event_id: core type (shader, tiler, l2 cache) + * @value: 64bits bitmask reporting the cores to power down + */ +TRACE_EVENT(mali_pm_power_off, + TP_PROTO(unsigned int event_id, unsigned long long value), + TP_ARGS(event_id, value), + TP_STRUCT__entry( + __field(unsigned int, event_id) + __field(unsigned long long, value) + ), + TP_fast_assign( + __entry->event_id = event_id; + __entry->value = value; + ), + TP_printk("event %u = %llu", __entry->event_id, __entry->value) +); + +/** + * mali_page_fault_insert_pages - Called by page_fault_worker() + * it reports an MMU page fault resulting in new pages being mapped. + * @event_id: MMU address space number. + * @value: number of newly allocated pages + */ +TRACE_EVENT(mali_page_fault_insert_pages, + TP_PROTO(int event_id, unsigned long value), + TP_ARGS(event_id, value), + TP_STRUCT__entry( + __field(int, event_id) + __field(unsigned long, value) + ), + TP_fast_assign( + __entry->event_id = event_id; + __entry->value = value; + ), + TP_printk("event %d = %lu", __entry->event_id, __entry->value) +); + +/** + * mali_mmu_as_in_use - Called by assign_and_activate_kctx_addr_space() + * it reports that a certain MMU address space is in use now. + * @event_id: MMU address space number. + */ +TRACE_EVENT(mali_mmu_as_in_use, + TP_PROTO(int event_id), + TP_ARGS(event_id), + TP_STRUCT__entry( + __field(int, event_id) + ), + TP_fast_assign( + __entry->event_id = event_id; + ), + TP_printk("event=%d", __entry->event_id) +); + +/** + * mali_mmu_as_released - Called by kbasep_js_runpool_release_ctx_internal() + * it reports that a certain MMU address space has been released now. + * @event_id: MMU address space number. + */ +TRACE_EVENT(mali_mmu_as_released, + TP_PROTO(int event_id), + TP_ARGS(event_id), + TP_STRUCT__entry( + __field(int, event_id) + ), + TP_fast_assign( + __entry->event_id = event_id; + ), + TP_printk("event=%d", __entry->event_id) +); + +/** + * mali_total_alloc_pages_change - Called by kbase_atomic_add_pages() + * and by kbase_atomic_sub_pages() + * it reports that the total number of allocated pages is changed. + * @event_id: number of pages to be added or subtracted (according to the sign). + */ +TRACE_EVENT(mali_total_alloc_pages_change, + TP_PROTO(long long int event_id), + TP_ARGS(event_id), + TP_STRUCT__entry( + __field(long long int, event_id) + ), + TP_fast_assign( + __entry->event_id = event_id; + ), + TP_printk("event=%lld", __entry->event_id) +); + +#endif /* _TRACE_MALI_H */ + +#undef TRACE_INCLUDE_PATH +#undef linux +#define TRACE_INCLUDE_PATH . + +/* This part must be outside protection */ +#include diff --git a/drivers/gpu/arm/midgard/mali_malisw.h b/drivers/gpu/arm/midgard/mali_malisw.h new file mode 100644 index 00000000000000..99452933eab480 --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_malisw.h @@ -0,0 +1,131 @@ +/* + * + * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +/** + * Kernel-wide include for common macros and types. + */ + +#ifndef _MALISW_H_ +#define _MALISW_H_ + +#include +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0) +#define U8_MAX ((u8)~0U) +#define S8_MAX ((s8)(U8_MAX>>1)) +#define S8_MIN ((s8)(-S8_MAX - 1)) +#define U16_MAX ((u16)~0U) +#define S16_MAX ((s16)(U16_MAX>>1)) +#define S16_MIN ((s16)(-S16_MAX - 1)) +#define U32_MAX ((u32)~0U) +#define S32_MAX ((s32)(U32_MAX>>1)) +#define S32_MIN ((s32)(-S32_MAX - 1)) +#define U64_MAX ((u64)~0ULL) +#define S64_MAX ((s64)(U64_MAX>>1)) +#define S64_MIN ((s64)(-S64_MAX - 1)) +#endif /* LINUX_VERSION_CODE */ +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0) +#define SIZE_MAX (~(size_t)0) +#endif /* LINUX_VERSION_CODE */ + +/** + * MIN - Return the lesser of two values. + * + * As a macro it may evaluate its arguments more than once. + * Refer to MAX macro for more details + */ +#define MIN(x, y) ((x) < (y) ? (x) : (y)) + +/** + * MAX - Return the greater of two values. + * + * As a macro it may evaluate its arguments more than once. + * If called on the same two arguments as MIN it is guaranteed to return + * the one that MIN didn't return. This is significant for types where not + * all values are comparable e.g. NaNs in floating-point types. But if you want + * to retrieve the min and max of two values, consider using a conditional swap + * instead. + */ +#define MAX(x, y) ((x) < (y) ? (y) : (x)) + +/** + * @hideinitializer + * Function-like macro for suppressing unused variable warnings. Where possible + * such variables should be removed; this macro is present for cases where we + * much support API backwards compatibility. + */ +#define CSTD_UNUSED(x) ((void)(x)) + +/** + * @hideinitializer + * Function-like macro for use where "no behavior" is desired. This is useful + * when compile time macros turn a function-like macro in to a no-op, but + * where having no statement is otherwise invalid. + */ +#define CSTD_NOP(...) ((void)#__VA_ARGS__) + +/** + * Function-like macro for converting a pointer in to a u64 for storing into + * an external data structure. This is commonly used when pairing a 32-bit + * CPU with a 64-bit peripheral, such as a Midgard GPU. C's type promotion + * is complex and a straight cast does not work reliably as pointers are + * often considered as signed. + */ +#define PTR_TO_U64(x) ((uint64_t)((uintptr_t)(x))) + +/** + * @hideinitializer + * Function-like macro for stringizing a single level macro. + * @code + * #define MY_MACRO 32 + * CSTD_STR1( MY_MACRO ) + * > "MY_MACRO" + * @endcode + */ +#define CSTD_STR1(x) #x + +/** + * @hideinitializer + * Function-like macro for stringizing a macro's value. This should not be used + * if the macro is defined in a way which may have no value; use the + * alternative @c CSTD_STR2N macro should be used instead. + * @code + * #define MY_MACRO 32 + * CSTD_STR2( MY_MACRO ) + * > "32" + * @endcode + */ +#define CSTD_STR2(x) CSTD_STR1(x) + +/** + * Specify an assertion value which is evaluated at compile time. Recommended + * usage is specification of a @c static @c INLINE function containing all of + * the assertions thus: + * + * @code + * static INLINE [module]_compile_time_assertions( void ) + * { + * COMPILE_TIME_ASSERT( sizeof(uintptr_t) == sizeof(intptr_t) ); + * } + * @endcode + * + * @note Use @c static not @c STATIC. We never want to turn off this @c static + * specification for testing purposes. + */ +#define CSTD_COMPILE_TIME_ASSERT(expr) \ + do { switch (0) { case 0: case (expr):; } } while (false) + +#endif /* _MALISW_H_ */ diff --git a/drivers/gpu/arm/midgard/mali_midg_coherency.h b/drivers/gpu/arm/midgard/mali_midg_coherency.h new file mode 100644 index 00000000000000..a509cbd5f17598 --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_midg_coherency.h @@ -0,0 +1,26 @@ +/* + * + * (C) COPYRIGHT 2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +#ifndef _MIDG_COHERENCY_H_ +#define _MIDG_COHERENCY_H_ + +#define COHERENCY_ACE_LITE 0 +#define COHERENCY_ACE 1 +#define COHERENCY_NONE 31 +#define COHERENCY_FEATURE_BIT(x) (1 << (x)) + +#endif /* _MIDG_COHERENCY_H_ */ diff --git a/drivers/gpu/arm/midgard/mali_midg_regmap.h b/drivers/gpu/arm/midgard/mali_midg_regmap.h new file mode 100644 index 00000000000000..bf431e03c90e28 --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_midg_regmap.h @@ -0,0 +1,618 @@ +/* + * + * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +#ifndef _MIDGARD_REGMAP_H_ +#define _MIDGARD_REGMAP_H_ + +#include "mali_midg_coherency.h" +#include "mali_kbase_gpu_id.h" + +/* + * Begin Register Offsets + */ + +#define GPU_CONTROL_BASE 0x0000 +#define GPU_CONTROL_REG(r) (GPU_CONTROL_BASE + (r)) +#define GPU_ID 0x000 /* (RO) GPU and revision identifier */ +#define L2_FEATURES 0x004 /* (RO) Level 2 cache features */ +#define SUSPEND_SIZE 0x008 /* (RO) Fixed-function suspend buffer + size */ +#define TILER_FEATURES 0x00C /* (RO) Tiler Features */ +#define MEM_FEATURES 0x010 /* (RO) Memory system features */ +#define MMU_FEATURES 0x014 /* (RO) MMU features */ +#define AS_PRESENT 0x018 /* (RO) Address space slots present */ +#define JS_PRESENT 0x01C /* (RO) Job slots present */ +#define GPU_IRQ_RAWSTAT 0x020 /* (RW) */ +#define GPU_IRQ_CLEAR 0x024 /* (WO) */ +#define GPU_IRQ_MASK 0x028 /* (RW) */ +#define GPU_IRQ_STATUS 0x02C /* (RO) */ + +/* IRQ flags */ +#define GPU_FAULT (1 << 0) /* A GPU Fault has occurred */ +#define MULTIPLE_GPU_FAULTS (1 << 7) /* More than one GPU Fault occurred. */ +#define RESET_COMPLETED (1 << 8) /* Set when a reset has completed. Intended to use with SOFT_RESET + commands which may take time. */ +#define POWER_CHANGED_SINGLE (1 << 9) /* Set when a single core has finished powering up or down. */ +#define POWER_CHANGED_ALL (1 << 10) /* Set when all cores have finished powering up or down + and the power manager is idle. */ + +#define PRFCNT_SAMPLE_COMPLETED (1 << 16) /* Set when a performance count sample has completed. */ +#define CLEAN_CACHES_COMPLETED (1 << 17) /* Set when a cache clean operation has completed. */ + +#define GPU_IRQ_REG_ALL (GPU_FAULT | MULTIPLE_GPU_FAULTS | RESET_COMPLETED \ + | POWER_CHANGED_ALL | PRFCNT_SAMPLE_COMPLETED) + +#define GPU_COMMAND 0x030 /* (WO) */ +#define GPU_STATUS 0x034 /* (RO) */ +#define LATEST_FLUSH 0x038 /* (RO) */ + +#define GROUPS_L2_COHERENT (1 << 0) /* Cores groups are l2 coherent */ +#define GPU_DBGEN (1 << 8) /* DBGEN wire status */ + +#define GPU_FAULTSTATUS 0x03C /* (RO) GPU exception type and fault status */ +#define GPU_FAULTADDRESS_LO 0x040 /* (RO) GPU exception fault address, low word */ +#define GPU_FAULTADDRESS_HI 0x044 /* (RO) GPU exception fault address, high word */ + +#define PWR_KEY 0x050 /* (WO) Power manager key register */ +#define PWR_OVERRIDE0 0x054 /* (RW) Power manager override settings */ +#define PWR_OVERRIDE1 0x058 /* (RW) Power manager override settings */ + +#define PRFCNT_BASE_LO 0x060 /* (RW) Performance counter memory region base address, low word */ +#define PRFCNT_BASE_HI 0x064 /* (RW) Performance counter memory region base address, high word */ +#define PRFCNT_CONFIG 0x068 /* (RW) Performance counter configuration */ +#define PRFCNT_JM_EN 0x06C /* (RW) Performance counter enable flags for Job Manager */ +#define PRFCNT_SHADER_EN 0x070 /* (RW) Performance counter enable flags for shader cores */ +#define PRFCNT_TILER_EN 0x074 /* (RW) Performance counter enable flags for tiler */ +#define PRFCNT_MMU_L2_EN 0x07C /* (RW) Performance counter enable flags for MMU/L2 cache */ + +#define CYCLE_COUNT_LO 0x090 /* (RO) Cycle counter, low word */ +#define CYCLE_COUNT_HI 0x094 /* (RO) Cycle counter, high word */ +#define TIMESTAMP_LO 0x098 /* (RO) Global time stamp counter, low word */ +#define TIMESTAMP_HI 0x09C /* (RO) Global time stamp counter, high word */ + +#define THREAD_MAX_THREADS 0x0A0 /* (RO) Maximum number of threads per core */ +#define THREAD_MAX_WORKGROUP_SIZE 0x0A4 /* (RO) Maximum workgroup size */ +#define THREAD_MAX_BARRIER_SIZE 0x0A8 /* (RO) Maximum threads waiting at a barrier */ +#define THREAD_FEATURES 0x0AC /* (RO) Thread features */ + +#define TEXTURE_FEATURES_0 0x0B0 /* (RO) Support flags for indexed texture formats 0..31 */ +#define TEXTURE_FEATURES_1 0x0B4 /* (RO) Support flags for indexed texture formats 32..63 */ +#define TEXTURE_FEATURES_2 0x0B8 /* (RO) Support flags for indexed texture formats 64..95 */ + +#define TEXTURE_FEATURES_REG(n) GPU_CONTROL_REG(TEXTURE_FEATURES_0 + ((n) << 2)) + +#define JS0_FEATURES 0x0C0 /* (RO) Features of job slot 0 */ +#define JS1_FEATURES 0x0C4 /* (RO) Features of job slot 1 */ +#define JS2_FEATURES 0x0C8 /* (RO) Features of job slot 2 */ +#define JS3_FEATURES 0x0CC /* (RO) Features of job slot 3 */ +#define JS4_FEATURES 0x0D0 /* (RO) Features of job slot 4 */ +#define JS5_FEATURES 0x0D4 /* (RO) Features of job slot 5 */ +#define JS6_FEATURES 0x0D8 /* (RO) Features of job slot 6 */ +#define JS7_FEATURES 0x0DC /* (RO) Features of job slot 7 */ +#define JS8_FEATURES 0x0E0 /* (RO) Features of job slot 8 */ +#define JS9_FEATURES 0x0E4 /* (RO) Features of job slot 9 */ +#define JS10_FEATURES 0x0E8 /* (RO) Features of job slot 10 */ +#define JS11_FEATURES 0x0EC /* (RO) Features of job slot 11 */ +#define JS12_FEATURES 0x0F0 /* (RO) Features of job slot 12 */ +#define JS13_FEATURES 0x0F4 /* (RO) Features of job slot 13 */ +#define JS14_FEATURES 0x0F8 /* (RO) Features of job slot 14 */ +#define JS15_FEATURES 0x0FC /* (RO) Features of job slot 15 */ + +#define JS_FEATURES_REG(n) GPU_CONTROL_REG(JS0_FEATURES + ((n) << 2)) + +#define SHADER_PRESENT_LO 0x100 /* (RO) Shader core present bitmap, low word */ +#define SHADER_PRESENT_HI 0x104 /* (RO) Shader core present bitmap, high word */ + +#define TILER_PRESENT_LO 0x110 /* (RO) Tiler core present bitmap, low word */ +#define TILER_PRESENT_HI 0x114 /* (RO) Tiler core present bitmap, high word */ + +#define L2_PRESENT_LO 0x120 /* (RO) Level 2 cache present bitmap, low word */ +#define L2_PRESENT_HI 0x124 /* (RO) Level 2 cache present bitmap, high word */ + +#define STACK_PRESENT_LO 0xE00 /* (RO) Core stack present bitmap, low word */ +#define STACK_PRESENT_HI 0xE04 /* (RO) Core stack present bitmap, high word */ + + +#define SHADER_READY_LO 0x140 /* (RO) Shader core ready bitmap, low word */ +#define SHADER_READY_HI 0x144 /* (RO) Shader core ready bitmap, high word */ + +#define TILER_READY_LO 0x150 /* (RO) Tiler core ready bitmap, low word */ +#define TILER_READY_HI 0x154 /* (RO) Tiler core ready bitmap, high word */ + +#define L2_READY_LO 0x160 /* (RO) Level 2 cache ready bitmap, low word */ +#define L2_READY_HI 0x164 /* (RO) Level 2 cache ready bitmap, high word */ + +#define STACK_READY_LO 0xE10 /* (RO) Core stack ready bitmap, low word */ +#define STACK_READY_HI 0xE14 /* (RO) Core stack ready bitmap, high word */ + + +#define SHADER_PWRON_LO 0x180 /* (WO) Shader core power on bitmap, low word */ +#define SHADER_PWRON_HI 0x184 /* (WO) Shader core power on bitmap, high word */ + +#define TILER_PWRON_LO 0x190 /* (WO) Tiler core power on bitmap, low word */ +#define TILER_PWRON_HI 0x194 /* (WO) Tiler core power on bitmap, high word */ + +#define L2_PWRON_LO 0x1A0 /* (WO) Level 2 cache power on bitmap, low word */ +#define L2_PWRON_HI 0x1A4 /* (WO) Level 2 cache power on bitmap, high word */ + +#define STACK_PWRON_LO 0xE20 /* (RO) Core stack power on bitmap, low word */ +#define STACK_PWRON_HI 0xE24 /* (RO) Core stack power on bitmap, high word */ + + +#define SHADER_PWROFF_LO 0x1C0 /* (WO) Shader core power off bitmap, low word */ +#define SHADER_PWROFF_HI 0x1C4 /* (WO) Shader core power off bitmap, high word */ + +#define TILER_PWROFF_LO 0x1D0 /* (WO) Tiler core power off bitmap, low word */ +#define TILER_PWROFF_HI 0x1D4 /* (WO) Tiler core power off bitmap, high word */ + +#define L2_PWROFF_LO 0x1E0 /* (WO) Level 2 cache power off bitmap, low word */ +#define L2_PWROFF_HI 0x1E4 /* (WO) Level 2 cache power off bitmap, high word */ + +#define STACK_PWROFF_LO 0xE30 /* (RO) Core stack power off bitmap, low word */ +#define STACK_PWROFF_HI 0xE34 /* (RO) Core stack power off bitmap, high word */ + + +#define SHADER_PWRTRANS_LO 0x200 /* (RO) Shader core power transition bitmap, low word */ +#define SHADER_PWRTRANS_HI 0x204 /* (RO) Shader core power transition bitmap, high word */ + +#define TILER_PWRTRANS_LO 0x210 /* (RO) Tiler core power transition bitmap, low word */ +#define TILER_PWRTRANS_HI 0x214 /* (RO) Tiler core power transition bitmap, high word */ + +#define L2_PWRTRANS_LO 0x220 /* (RO) Level 2 cache power transition bitmap, low word */ +#define L2_PWRTRANS_HI 0x224 /* (RO) Level 2 cache power transition bitmap, high word */ + +#define STACK_PWRTRANS_LO 0xE40 /* (RO) Core stack power transition bitmap, low word */ +#define STACK_PWRTRANS_HI 0xE44 /* (RO) Core stack power transition bitmap, high word */ + + +#define SHADER_PWRACTIVE_LO 0x240 /* (RO) Shader core active bitmap, low word */ +#define SHADER_PWRACTIVE_HI 0x244 /* (RO) Shader core active bitmap, high word */ + +#define TILER_PWRACTIVE_LO 0x250 /* (RO) Tiler core active bitmap, low word */ +#define TILER_PWRACTIVE_HI 0x254 /* (RO) Tiler core active bitmap, high word */ + +#define L2_PWRACTIVE_LO 0x260 /* (RO) Level 2 cache active bitmap, low word */ +#define L2_PWRACTIVE_HI 0x264 /* (RO) Level 2 cache active bitmap, high word */ + +#define COHERENCY_FEATURES 0x300 /* (RO) Coherency features present */ +#define COHERENCY_ENABLE 0x304 /* (RW) Coherency enable */ + +#define JM_CONFIG 0xF00 /* (RW) Job Manager configuration register (Implementation specific register) */ +#define SHADER_CONFIG 0xF04 /* (RW) Shader core configuration settings (Implementation specific register) */ +#define TILER_CONFIG 0xF08 /* (RW) Tiler core configuration settings (Implementation specific register) */ +#define L2_MMU_CONFIG 0xF0C /* (RW) Configuration of the L2 cache and MMU (Implementation specific register) */ + +#define JOB_CONTROL_BASE 0x1000 + +#define JOB_CONTROL_REG(r) (JOB_CONTROL_BASE + (r)) + +#define JOB_IRQ_RAWSTAT 0x000 /* Raw interrupt status register */ +#define JOB_IRQ_CLEAR 0x004 /* Interrupt clear register */ +#define JOB_IRQ_MASK 0x008 /* Interrupt mask register */ +#define JOB_IRQ_STATUS 0x00C /* Interrupt status register */ +#define JOB_IRQ_JS_STATE 0x010 /* status==active and _next == busy snapshot from last JOB_IRQ_CLEAR */ +#define JOB_IRQ_THROTTLE 0x014 /* cycles to delay delivering an interrupt externally. The JOB_IRQ_STATUS is NOT affected by this, just the delivery of the interrupt. */ + +#define JOB_SLOT0 0x800 /* Configuration registers for job slot 0 */ +#define JOB_SLOT1 0x880 /* Configuration registers for job slot 1 */ +#define JOB_SLOT2 0x900 /* Configuration registers for job slot 2 */ +#define JOB_SLOT3 0x980 /* Configuration registers for job slot 3 */ +#define JOB_SLOT4 0xA00 /* Configuration registers for job slot 4 */ +#define JOB_SLOT5 0xA80 /* Configuration registers for job slot 5 */ +#define JOB_SLOT6 0xB00 /* Configuration registers for job slot 6 */ +#define JOB_SLOT7 0xB80 /* Configuration registers for job slot 7 */ +#define JOB_SLOT8 0xC00 /* Configuration registers for job slot 8 */ +#define JOB_SLOT9 0xC80 /* Configuration registers for job slot 9 */ +#define JOB_SLOT10 0xD00 /* Configuration registers for job slot 10 */ +#define JOB_SLOT11 0xD80 /* Configuration registers for job slot 11 */ +#define JOB_SLOT12 0xE00 /* Configuration registers for job slot 12 */ +#define JOB_SLOT13 0xE80 /* Configuration registers for job slot 13 */ +#define JOB_SLOT14 0xF00 /* Configuration registers for job slot 14 */ +#define JOB_SLOT15 0xF80 /* Configuration registers for job slot 15 */ + +#define JOB_SLOT_REG(n, r) (JOB_CONTROL_REG(JOB_SLOT0 + ((n) << 7)) + (r)) + +#define JS_HEAD_LO 0x00 /* (RO) Job queue head pointer for job slot n, low word */ +#define JS_HEAD_HI 0x04 /* (RO) Job queue head pointer for job slot n, high word */ +#define JS_TAIL_LO 0x08 /* (RO) Job queue tail pointer for job slot n, low word */ +#define JS_TAIL_HI 0x0C /* (RO) Job queue tail pointer for job slot n, high word */ +#define JS_AFFINITY_LO 0x10 /* (RO) Core affinity mask for job slot n, low word */ +#define JS_AFFINITY_HI 0x14 /* (RO) Core affinity mask for job slot n, high word */ +#define JS_CONFIG 0x18 /* (RO) Configuration settings for job slot n */ +#define JS_XAFFINITY 0x1C /* (RO) Extended affinity mask for job + slot n */ + +#define JS_COMMAND 0x20 /* (WO) Command register for job slot n */ +#define JS_STATUS 0x24 /* (RO) Status register for job slot n */ + +#define JS_HEAD_NEXT_LO 0x40 /* (RW) Next job queue head pointer for job slot n, low word */ +#define JS_HEAD_NEXT_HI 0x44 /* (RW) Next job queue head pointer for job slot n, high word */ + +#define JS_AFFINITY_NEXT_LO 0x50 /* (RW) Next core affinity mask for job slot n, low word */ +#define JS_AFFINITY_NEXT_HI 0x54 /* (RW) Next core affinity mask for job slot n, high word */ +#define JS_CONFIG_NEXT 0x58 /* (RW) Next configuration settings for job slot n */ +#define JS_XAFFINITY_NEXT 0x5C /* (RW) Next extended affinity mask for + job slot n */ + +#define JS_COMMAND_NEXT 0x60 /* (RW) Next command register for job slot n */ + +#define JS_FLUSH_ID_NEXT 0x70 /* (RW) Next job slot n cache flush ID */ + +#define MEMORY_MANAGEMENT_BASE 0x2000 +#define MMU_REG(r) (MEMORY_MANAGEMENT_BASE + (r)) + +#define MMU_IRQ_RAWSTAT 0x000 /* (RW) Raw interrupt status register */ +#define MMU_IRQ_CLEAR 0x004 /* (WO) Interrupt clear register */ +#define MMU_IRQ_MASK 0x008 /* (RW) Interrupt mask register */ +#define MMU_IRQ_STATUS 0x00C /* (RO) Interrupt status register */ + +#define MMU_AS0 0x400 /* Configuration registers for address space 0 */ +#define MMU_AS1 0x440 /* Configuration registers for address space 1 */ +#define MMU_AS2 0x480 /* Configuration registers for address space 2 */ +#define MMU_AS3 0x4C0 /* Configuration registers for address space 3 */ +#define MMU_AS4 0x500 /* Configuration registers for address space 4 */ +#define MMU_AS5 0x540 /* Configuration registers for address space 5 */ +#define MMU_AS6 0x580 /* Configuration registers for address space 6 */ +#define MMU_AS7 0x5C0 /* Configuration registers for address space 7 */ +#define MMU_AS8 0x600 /* Configuration registers for address space 8 */ +#define MMU_AS9 0x640 /* Configuration registers for address space 9 */ +#define MMU_AS10 0x680 /* Configuration registers for address space 10 */ +#define MMU_AS11 0x6C0 /* Configuration registers for address space 11 */ +#define MMU_AS12 0x700 /* Configuration registers for address space 12 */ +#define MMU_AS13 0x740 /* Configuration registers for address space 13 */ +#define MMU_AS14 0x780 /* Configuration registers for address space 14 */ +#define MMU_AS15 0x7C0 /* Configuration registers for address space 15 */ + +#define MMU_AS_REG(n, r) (MMU_REG(MMU_AS0 + ((n) << 6)) + (r)) + +#define AS_TRANSTAB_LO 0x00 /* (RW) Translation Table Base Address for address space n, low word */ +#define AS_TRANSTAB_HI 0x04 /* (RW) Translation Table Base Address for address space n, high word */ +#define AS_MEMATTR_LO 0x08 /* (RW) Memory attributes for address space n, low word. */ +#define AS_MEMATTR_HI 0x0C /* (RW) Memory attributes for address space n, high word. */ +#define AS_LOCKADDR_LO 0x10 /* (RW) Lock region address for address space n, low word */ +#define AS_LOCKADDR_HI 0x14 /* (RW) Lock region address for address space n, high word */ +#define AS_COMMAND 0x18 /* (WO) MMU command register for address space n */ +#define AS_FAULTSTATUS 0x1C /* (RO) MMU fault status register for address space n */ +#define AS_FAULTADDRESS_LO 0x20 /* (RO) Fault Address for address space n, low word */ +#define AS_FAULTADDRESS_HI 0x24 /* (RO) Fault Address for address space n, high word */ +#define AS_STATUS 0x28 /* (RO) Status flags for address space n */ + + +/* (RW) Translation table configuration for address space n, low word */ +#define AS_TRANSCFG_LO 0x30 +/* (RW) Translation table configuration for address space n, high word */ +#define AS_TRANSCFG_HI 0x34 +/* (RO) Secondary fault address for address space n, low word */ +#define AS_FAULTEXTRA_LO 0x38 +/* (RO) Secondary fault address for address space n, high word */ +#define AS_FAULTEXTRA_HI 0x3C + +/* End Register Offsets */ + +/* + * MMU_IRQ_RAWSTAT register values. Values are valid also for + MMU_IRQ_CLEAR, MMU_IRQ_MASK, MMU_IRQ_STATUS registers. + */ + +#define MMU_PAGE_FAULT_FLAGS 16 + +/* Macros returning a bitmask to retrieve page fault or bus error flags from + * MMU registers */ +#define MMU_PAGE_FAULT(n) (1UL << (n)) +#define MMU_BUS_ERROR(n) (1UL << ((n) + MMU_PAGE_FAULT_FLAGS)) + +/* + * Begin LPAE MMU TRANSTAB register values + */ +#define AS_TRANSTAB_LPAE_ADDR_SPACE_MASK 0xfffff000 +#define AS_TRANSTAB_LPAE_ADRMODE_UNMAPPED (0u << 0) +#define AS_TRANSTAB_LPAE_ADRMODE_IDENTITY (1u << 1) +#define AS_TRANSTAB_LPAE_ADRMODE_TABLE (3u << 0) +#define AS_TRANSTAB_LPAE_READ_INNER (1u << 2) +#define AS_TRANSTAB_LPAE_SHARE_OUTER (1u << 4) + +#define AS_TRANSTAB_LPAE_ADRMODE_MASK 0x00000003 + +/* + * Begin AARCH64 MMU TRANSTAB register values + */ +#define MMU_HW_OUTA_BITS 40 +#define AS_TRANSTAB_BASE_MASK ((1ULL << MMU_HW_OUTA_BITS) - (1ULL << 4)) + +/* + * Begin MMU STATUS register values + */ +#define AS_STATUS_AS_ACTIVE 0x01 + +#define AS_FAULTSTATUS_EXCEPTION_CODE_MASK (0x7<<3) +#define AS_FAULTSTATUS_EXCEPTION_CODE_TRANSLATION_FAULT (0x0<<3) +#define AS_FAULTSTATUS_EXCEPTION_CODE_PERMISSION_FAULT (0x1<<3) +#define AS_FAULTSTATUS_EXCEPTION_CODE_TRANSTAB_BUS_FAULT (0x2<<3) +#define AS_FAULTSTATUS_EXCEPTION_CODE_ACCESS_FLAG (0x3<<3) + +#define AS_FAULTSTATUS_EXCEPTION_CODE_ADDRESS_SIZE_FAULT (0x4<<3) +#define AS_FAULTSTATUS_EXCEPTION_CODE_MEMORY_ATTRIBUTES_FAULT (0x5<<3) + +#define AS_FAULTSTATUS_ACCESS_TYPE_MASK (0x3<<8) +#define AS_FAULTSTATUS_ACCESS_TYPE_ATOMIC (0x0<<8) +#define AS_FAULTSTATUS_ACCESS_TYPE_EX (0x1<<8) +#define AS_FAULTSTATUS_ACCESS_TYPE_READ (0x2<<8) +#define AS_FAULTSTATUS_ACCESS_TYPE_WRITE (0x3<<8) + +/* + * Begin MMU TRANSCFG register values + */ + +#define AS_TRANSCFG_ADRMODE_LEGACY 0 +#define AS_TRANSCFG_ADRMODE_UNMAPPED 1 +#define AS_TRANSCFG_ADRMODE_IDENTITY 2 +#define AS_TRANSCFG_ADRMODE_AARCH64_4K 6 +#define AS_TRANSCFG_ADRMODE_AARCH64_64K 8 + +#define AS_TRANSCFG_ADRMODE_MASK 0xF + + +/* + * Begin TRANSCFG register values + */ +#define AS_TRANSCFG_PTW_MEMATTR_MASK (3 << 24) +#define AS_TRANSCFG_PTW_MEMATTR_NON_CACHEABLE (1 << 24) +#define AS_TRANSCFG_PTW_MEMATTR_WRITE_BACK (2 << 24) + +#define AS_TRANSCFG_PTW_SH_MASK ((3 << 28)) +#define AS_TRANSCFG_PTW_SH_OS (2 << 28) +#define AS_TRANSCFG_PTW_SH_IS (3 << 28) + +/* + * Begin Command Values + */ + +/* JS_COMMAND register commands */ +#define JS_COMMAND_NOP 0x00 /* NOP Operation. Writing this value is ignored */ +#define JS_COMMAND_START 0x01 /* Start processing a job chain. Writing this value is ignored */ +#define JS_COMMAND_SOFT_STOP 0x02 /* Gently stop processing a job chain */ +#define JS_COMMAND_HARD_STOP 0x03 /* Rudely stop processing a job chain */ +#define JS_COMMAND_SOFT_STOP_0 0x04 /* Execute SOFT_STOP if JOB_CHAIN_FLAG is 0 */ +#define JS_COMMAND_HARD_STOP_0 0x05 /* Execute HARD_STOP if JOB_CHAIN_FLAG is 0 */ +#define JS_COMMAND_SOFT_STOP_1 0x06 /* Execute SOFT_STOP if JOB_CHAIN_FLAG is 1 */ +#define JS_COMMAND_HARD_STOP_1 0x07 /* Execute HARD_STOP if JOB_CHAIN_FLAG is 1 */ + +#define JS_COMMAND_MASK 0x07 /* Mask of bits currently in use by the HW */ + +/* AS_COMMAND register commands */ +#define AS_COMMAND_NOP 0x00 /* NOP Operation */ +#define AS_COMMAND_UPDATE 0x01 /* Broadcasts the values in AS_TRANSTAB and ASn_MEMATTR to all MMUs */ +#define AS_COMMAND_LOCK 0x02 /* Issue a lock region command to all MMUs */ +#define AS_COMMAND_UNLOCK 0x03 /* Issue a flush region command to all MMUs */ +#define AS_COMMAND_FLUSH 0x04 /* Flush all L2 caches then issue a flush region command to all MMUs + (deprecated - only for use with T60x) */ +#define AS_COMMAND_FLUSH_PT 0x04 /* Flush all L2 caches then issue a flush region command to all MMUs */ +#define AS_COMMAND_FLUSH_MEM 0x05 /* Wait for memory accesses to complete, flush all the L1s cache then + flush all L2 caches then issue a flush region command to all MMUs */ + +/* Possible values of JS_CONFIG and JS_CONFIG_NEXT registers */ +#define JS_CONFIG_START_FLUSH_NO_ACTION (0u << 0) +#define JS_CONFIG_START_FLUSH_CLEAN (1u << 8) +#define JS_CONFIG_START_FLUSH_CLEAN_INVALIDATE (3u << 8) +#define JS_CONFIG_START_MMU (1u << 10) +#define JS_CONFIG_JOB_CHAIN_FLAG (1u << 11) +#define JS_CONFIG_END_FLUSH_NO_ACTION JS_CONFIG_START_FLUSH_NO_ACTION +#define JS_CONFIG_END_FLUSH_CLEAN (1u << 12) +#define JS_CONFIG_END_FLUSH_CLEAN_INVALIDATE (3u << 12) +#define JS_CONFIG_ENABLE_FLUSH_REDUCTION (1u << 14) +#define JS_CONFIG_DISABLE_DESCRIPTOR_WR_BK (1u << 15) +#define JS_CONFIG_THREAD_PRI(n) ((n) << 16) + +/* JS_XAFFINITY register values */ +#define JS_XAFFINITY_XAFFINITY_ENABLE (1u << 0) +#define JS_XAFFINITY_TILER_ENABLE (1u << 8) +#define JS_XAFFINITY_CACHE_ENABLE (1u << 16) + +/* JS_STATUS register values */ + +/* NOTE: Please keep this values in sync with enum base_jd_event_code in mali_base_kernel.h. + * The values are separated to avoid dependency of userspace and kernel code. + */ + +/* Group of values representing the job status insead a particular fault */ +#define JS_STATUS_NO_EXCEPTION_BASE 0x00 +#define JS_STATUS_INTERRUPTED (JS_STATUS_NO_EXCEPTION_BASE + 0x02) /* 0x02 means INTERRUPTED */ +#define JS_STATUS_STOPPED (JS_STATUS_NO_EXCEPTION_BASE + 0x03) /* 0x03 means STOPPED */ +#define JS_STATUS_TERMINATED (JS_STATUS_NO_EXCEPTION_BASE + 0x04) /* 0x04 means TERMINATED */ + +/* General fault values */ +#define JS_STATUS_FAULT_BASE 0x40 +#define JS_STATUS_CONFIG_FAULT (JS_STATUS_FAULT_BASE) /* 0x40 means CONFIG FAULT */ +#define JS_STATUS_POWER_FAULT (JS_STATUS_FAULT_BASE + 0x01) /* 0x41 means POWER FAULT */ +#define JS_STATUS_READ_FAULT (JS_STATUS_FAULT_BASE + 0x02) /* 0x42 means READ FAULT */ +#define JS_STATUS_WRITE_FAULT (JS_STATUS_FAULT_BASE + 0x03) /* 0x43 means WRITE FAULT */ +#define JS_STATUS_AFFINITY_FAULT (JS_STATUS_FAULT_BASE + 0x04) /* 0x44 means AFFINITY FAULT */ +#define JS_STATUS_BUS_FAULT (JS_STATUS_FAULT_BASE + 0x08) /* 0x48 means BUS FAULT */ + +/* Instruction or data faults */ +#define JS_STATUS_INSTRUCTION_FAULT_BASE 0x50 +#define JS_STATUS_INSTR_INVALID_PC (JS_STATUS_INSTRUCTION_FAULT_BASE) /* 0x50 means INSTR INVALID PC */ +#define JS_STATUS_INSTR_INVALID_ENC (JS_STATUS_INSTRUCTION_FAULT_BASE + 0x01) /* 0x51 means INSTR INVALID ENC */ +#define JS_STATUS_INSTR_TYPE_MISMATCH (JS_STATUS_INSTRUCTION_FAULT_BASE + 0x02) /* 0x52 means INSTR TYPE MISMATCH */ +#define JS_STATUS_INSTR_OPERAND_FAULT (JS_STATUS_INSTRUCTION_FAULT_BASE + 0x03) /* 0x53 means INSTR OPERAND FAULT */ +#define JS_STATUS_INSTR_TLS_FAULT (JS_STATUS_INSTRUCTION_FAULT_BASE + 0x04) /* 0x54 means INSTR TLS FAULT */ +#define JS_STATUS_INSTR_BARRIER_FAULT (JS_STATUS_INSTRUCTION_FAULT_BASE + 0x05) /* 0x55 means INSTR BARRIER FAULT */ +#define JS_STATUS_INSTR_ALIGN_FAULT (JS_STATUS_INSTRUCTION_FAULT_BASE + 0x06) /* 0x56 means INSTR ALIGN FAULT */ +/* NOTE: No fault with 0x57 code defined in spec. */ +#define JS_STATUS_DATA_INVALID_FAULT (JS_STATUS_INSTRUCTION_FAULT_BASE + 0x08) /* 0x58 means DATA INVALID FAULT */ +#define JS_STATUS_TILE_RANGE_FAULT (JS_STATUS_INSTRUCTION_FAULT_BASE + 0x09) /* 0x59 means TILE RANGE FAULT */ +#define JS_STATUS_ADDRESS_RANGE_FAULT (JS_STATUS_INSTRUCTION_FAULT_BASE + 0x0A) /* 0x5A means ADDRESS RANGE FAULT */ + +/* Other faults */ +#define JS_STATUS_MEMORY_FAULT_BASE 0x60 +#define JS_STATUS_OUT_OF_MEMORY (JS_STATUS_MEMORY_FAULT_BASE) /* 0x60 means OUT OF MEMORY */ +#define JS_STATUS_UNKNOWN 0x7F /* 0x7F means UNKNOWN */ + +/* GPU_COMMAND values */ +#define GPU_COMMAND_NOP 0x00 /* No operation, nothing happens */ +#define GPU_COMMAND_SOFT_RESET 0x01 /* Stop all external bus interfaces, and then reset the entire GPU. */ +#define GPU_COMMAND_HARD_RESET 0x02 /* Immediately reset the entire GPU. */ +#define GPU_COMMAND_PRFCNT_CLEAR 0x03 /* Clear all performance counters, setting them all to zero. */ +#define GPU_COMMAND_PRFCNT_SAMPLE 0x04 /* Sample all performance counters, writing them out to memory */ +#define GPU_COMMAND_CYCLE_COUNT_START 0x05 /* Starts the cycle counter, and system timestamp propagation */ +#define GPU_COMMAND_CYCLE_COUNT_STOP 0x06 /* Stops the cycle counter, and system timestamp propagation */ +#define GPU_COMMAND_CLEAN_CACHES 0x07 /* Clean all caches */ +#define GPU_COMMAND_CLEAN_INV_CACHES 0x08 /* Clean and invalidate all caches */ +#define GPU_COMMAND_SET_PROTECTED_MODE 0x09 /* Places the GPU in protected mode */ + +/* End Command Values */ + +/* GPU_STATUS values */ +#define GPU_STATUS_PRFCNT_ACTIVE (1 << 2) /* Set if the performance counters are active. */ +#define GPU_STATUS_PROTECTED_MODE_ACTIVE (1 << 7) /* Set if protected mode is active */ + +/* PRFCNT_CONFIG register values */ +#define PRFCNT_CONFIG_MODE_SHIFT 0 /* Counter mode position. */ +#define PRFCNT_CONFIG_AS_SHIFT 4 /* Address space bitmap position. */ +#define PRFCNT_CONFIG_SETSELECT_SHIFT 8 /* Set select position. */ + +#define PRFCNT_CONFIG_MODE_OFF 0 /* The performance counters are disabled. */ +#define PRFCNT_CONFIG_MODE_MANUAL 1 /* The performance counters are enabled, but are only written out when a PRFCNT_SAMPLE command is issued using the GPU_COMMAND register. */ +#define PRFCNT_CONFIG_MODE_TILE 2 /* The performance counters are enabled, and are written out each time a tile finishes rendering. */ + +/* AS_MEMATTR values: */ +/* Use GPU implementation-defined caching policy. */ +#define AS_MEMATTR_IMPL_DEF_CACHE_POLICY 0x88ull +/* The attribute set to force all resources to be cached. */ +#define AS_MEMATTR_FORCE_TO_CACHE_ALL 0x8Full +/* Inner write-alloc cache setup, no outer caching */ +#define AS_MEMATTR_WRITE_ALLOC 0x8Dull + +/* Set to implementation defined, outer caching */ +#define AS_MEMATTR_AARCH64_OUTER_IMPL_DEF 0x88ull +/* Set to write back memory, outer caching */ +#define AS_MEMATTR_AARCH64_OUTER_WA 0x8Dull + +/* Use GPU implementation-defined caching policy. */ +#define AS_MEMATTR_LPAE_IMPL_DEF_CACHE_POLICY 0x48ull +/* The attribute set to force all resources to be cached. */ +#define AS_MEMATTR_LPAE_FORCE_TO_CACHE_ALL 0x4Full +/* Inner write-alloc cache setup, no outer caching */ +#define AS_MEMATTR_LPAE_WRITE_ALLOC 0x4Dull +/* Set to implementation defined, outer caching */ +#define AS_MEMATTR_LPAE_OUTER_IMPL_DEF 0x88ull +/* Set to write back memory, outer caching */ +#define AS_MEMATTR_LPAE_OUTER_WA 0x8Dull + +/* Symbols for default MEMATTR to use + * Default is - HW implementation defined caching */ +#define AS_MEMATTR_INDEX_DEFAULT 0 +#define AS_MEMATTR_INDEX_DEFAULT_ACE 3 + +/* HW implementation defined caching */ +#define AS_MEMATTR_INDEX_IMPL_DEF_CACHE_POLICY 0 +/* Force cache on */ +#define AS_MEMATTR_INDEX_FORCE_TO_CACHE_ALL 1 +/* Write-alloc */ +#define AS_MEMATTR_INDEX_WRITE_ALLOC 2 +/* Outer coherent, inner implementation defined policy */ +#define AS_MEMATTR_INDEX_OUTER_IMPL_DEF 3 +/* Outer coherent, write alloc inner */ +#define AS_MEMATTR_INDEX_OUTER_WA 4 + +/* JS_FEATURES register */ + +#define JS_FEATURE_NULL_JOB (1u << 1) +#define JS_FEATURE_SET_VALUE_JOB (1u << 2) +#define JS_FEATURE_CACHE_FLUSH_JOB (1u << 3) +#define JS_FEATURE_COMPUTE_JOB (1u << 4) +#define JS_FEATURE_VERTEX_JOB (1u << 5) +#define JS_FEATURE_GEOMETRY_JOB (1u << 6) +#define JS_FEATURE_TILER_JOB (1u << 7) +#define JS_FEATURE_FUSED_JOB (1u << 8) +#define JS_FEATURE_FRAGMENT_JOB (1u << 9) + +/* End JS_FEATURES register */ + +/* L2_MMU_CONFIG register */ +#define L2_MMU_CONFIG_ALLOW_SNOOP_DISPARITY_SHIFT (23) +#define L2_MMU_CONFIG_ALLOW_SNOOP_DISPARITY (0x1 << L2_MMU_CONFIG_ALLOW_SNOOP_DISPARITY_SHIFT) +#define L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_SHIFT (24) +#define L2_MMU_CONFIG_LIMIT_EXTERNAL_READS (0x3 << L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_SHIFT) +#define L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_OCTANT (0x1 << L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_SHIFT) +#define L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_QUARTER (0x2 << L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_SHIFT) +#define L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_HALF (0x3 << L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_SHIFT) + +#define L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_SHIFT (26) +#define L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES (0x3 << L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_SHIFT) +#define L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_OCTANT (0x1 << L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_SHIFT) +#define L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_QUARTER (0x2 << L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_SHIFT) +#define L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_HALF (0x3 << L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_SHIFT) + +#define L2_MMU_CONFIG_3BIT_LIMIT_EXTERNAL_READS_SHIFT (12) +#define L2_MMU_CONFIG_3BIT_LIMIT_EXTERNAL_READS (0x7 << L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_SHIFT) + +#define L2_MMU_CONFIG_3BIT_LIMIT_EXTERNAL_WRITES_SHIFT (15) +#define L2_MMU_CONFIG_3BIT_LIMIT_EXTERNAL_WRITES (0x7 << L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_SHIFT) + +/* End L2_MMU_CONFIG register */ + +/* THREAD_* registers */ + +/* THREAD_FEATURES IMPLEMENTATION_TECHNOLOGY values */ +#define IMPLEMENTATION_UNSPECIFIED 0 +#define IMPLEMENTATION_SILICON 1 +#define IMPLEMENTATION_FPGA 2 +#define IMPLEMENTATION_MODEL 3 + +/* Default values when registers are not supported by the implemented hardware */ +#define THREAD_MT_DEFAULT 256 +#define THREAD_MWS_DEFAULT 256 +#define THREAD_MBS_DEFAULT 256 +#define THREAD_MR_DEFAULT 1024 +#define THREAD_MTQ_DEFAULT 4 +#define THREAD_MTGS_DEFAULT 10 + +/* End THREAD_* registers */ + +/* SHADER_CONFIG register */ + +#define SC_ALT_COUNTERS (1ul << 3) +#define SC_OVERRIDE_FWD_PIXEL_KILL (1ul << 4) +#define SC_SDC_DISABLE_OQ_DISCARD (1ul << 6) +#define SC_LS_ALLOW_ATTR_TYPES (1ul << 16) +#define SC_LS_PAUSEBUFFER_DISABLE (1ul << 16) +#define SC_TLS_HASH_ENABLE (1ul << 17) +#define SC_LS_ATTR_CHECK_DISABLE (1ul << 18) +#define SC_ENABLE_TEXGRD_FLAGS (1ul << 25) +/* End SHADER_CONFIG register */ + +/* TILER_CONFIG register */ + +#define TC_CLOCK_GATE_OVERRIDE (1ul << 0) + +/* End TILER_CONFIG register */ + +/* JM_CONFIG register */ + +#define JM_TIMESTAMP_OVERRIDE (1ul << 0) +#define JM_CLOCK_GATE_OVERRIDE (1ul << 1) +#define JM_JOB_THROTTLE_ENABLE (1ul << 2) +#define JM_JOB_THROTTLE_LIMIT_SHIFT (3) +#define JM_MAX_JOB_THROTTLE_LIMIT (0x3F) +#define JM_FORCE_COHERENCY_FEATURES_SHIFT (2) +#define JM_IDVS_GROUP_SIZE_SHIFT (16) +#define JM_MAX_IDVS_GROUP_SIZE (0x3F) +/* End JM_CONFIG register */ + + +#endif /* _MIDGARD_REGMAP_H_ */ diff --git a/drivers/gpu/arm/midgard/mali_timeline.h b/drivers/gpu/arm/midgard/mali_timeline.h new file mode 100644 index 00000000000000..bd5f6614b6bb08 --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_timeline.h @@ -0,0 +1,396 @@ +/* + * + * (C) COPYRIGHT 2012-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM mali_timeline + +#if !defined(_MALI_TIMELINE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _MALI_TIMELINE_H + +#include + +TRACE_EVENT(mali_timeline_atoms_in_flight, + + TP_PROTO(u64 ts_sec, + u32 ts_nsec, + int tgid, + int count), + + TP_ARGS(ts_sec, + ts_nsec, + tgid, + count), + + TP_STRUCT__entry( + __field(u64, ts_sec) + __field(u32, ts_nsec) + __field(int, tgid) + __field(int, count) + ), + + TP_fast_assign( + __entry->ts_sec = ts_sec; + __entry->ts_nsec = ts_nsec; + __entry->tgid = tgid; + __entry->count = count; + ), + + TP_printk("%i,%i.%.9i,%i,%i", CTX_SET_NR_ATOMS_IN_FLIGHT, + (int)__entry->ts_sec, + (int)__entry->ts_nsec, + __entry->tgid, + __entry->count) +); + + +TRACE_EVENT(mali_timeline_atom, + + TP_PROTO(u64 ts_sec, + u32 ts_nsec, + int event_type, + int tgid, + int atom_id), + + TP_ARGS(ts_sec, + ts_nsec, + event_type, + tgid, + atom_id), + + TP_STRUCT__entry( + __field(u64, ts_sec) + __field(u32, ts_nsec) + __field(int, event_type) + __field(int, tgid) + __field(int, atom_id) + ), + + TP_fast_assign( + __entry->ts_sec = ts_sec; + __entry->ts_nsec = ts_nsec; + __entry->event_type = event_type; + __entry->tgid = tgid; + __entry->atom_id = atom_id; + ), + + TP_printk("%i,%i.%.9i,%i,%i,%i", __entry->event_type, + (int)__entry->ts_sec, + (int)__entry->ts_nsec, + __entry->tgid, + __entry->atom_id, + __entry->atom_id) +); + +TRACE_EVENT(mali_timeline_gpu_slot_active, + + TP_PROTO(u64 ts_sec, + u32 ts_nsec, + int event_type, + int tgid, + int js, + int count), + + TP_ARGS(ts_sec, + ts_nsec, + event_type, + tgid, + js, + count), + + TP_STRUCT__entry( + __field(u64, ts_sec) + __field(u32, ts_nsec) + __field(int, event_type) + __field(int, tgid) + __field(int, js) + __field(int, count) + ), + + TP_fast_assign( + __entry->ts_sec = ts_sec; + __entry->ts_nsec = ts_nsec; + __entry->event_type = event_type; + __entry->tgid = tgid; + __entry->js = js; + __entry->count = count; + ), + + TP_printk("%i,%i.%.9i,%i,%i,%i", __entry->event_type, + (int)__entry->ts_sec, + (int)__entry->ts_nsec, + __entry->tgid, + __entry->js, + __entry->count) +); + +TRACE_EVENT(mali_timeline_gpu_slot_action, + + TP_PROTO(u64 ts_sec, + u32 ts_nsec, + int event_type, + int tgid, + int js, + int count), + + TP_ARGS(ts_sec, + ts_nsec, + event_type, + tgid, + js, + count), + + TP_STRUCT__entry( + __field(u64, ts_sec) + __field(u32, ts_nsec) + __field(int, event_type) + __field(int, tgid) + __field(int, js) + __field(int, count) + ), + + TP_fast_assign( + __entry->ts_sec = ts_sec; + __entry->ts_nsec = ts_nsec; + __entry->event_type = event_type; + __entry->tgid = tgid; + __entry->js = js; + __entry->count = count; + ), + + TP_printk("%i,%i.%.9i,%i,%i,%i", __entry->event_type, + (int)__entry->ts_sec, + (int)__entry->ts_nsec, + __entry->tgid, + __entry->js, + __entry->count) +); + +TRACE_EVENT(mali_timeline_gpu_power_active, + + TP_PROTO(u64 ts_sec, + u32 ts_nsec, + int event_type, + int active), + + TP_ARGS(ts_sec, + ts_nsec, + event_type, + active), + + TP_STRUCT__entry( + __field(u64, ts_sec) + __field(u32, ts_nsec) + __field(int, event_type) + __field(int, active) + ), + + TP_fast_assign( + __entry->ts_sec = ts_sec; + __entry->ts_nsec = ts_nsec; + __entry->event_type = event_type; + __entry->active = active; + ), + + TP_printk("%i,%i.%.9i,0,%i", __entry->event_type, + (int)__entry->ts_sec, + (int)__entry->ts_nsec, + __entry->active) + +); + +TRACE_EVENT(mali_timeline_l2_power_active, + + TP_PROTO(u64 ts_sec, + u32 ts_nsec, + int event_type, + int state), + + TP_ARGS(ts_sec, + ts_nsec, + event_type, + state), + + TP_STRUCT__entry( + __field(u64, ts_sec) + __field(u32, ts_nsec) + __field(int, event_type) + __field(int, state) + ), + + TP_fast_assign( + __entry->ts_sec = ts_sec; + __entry->ts_nsec = ts_nsec; + __entry->event_type = event_type; + __entry->state = state; + ), + + TP_printk("%i,%i.%.9i,0,%i", __entry->event_type, + (int)__entry->ts_sec, + (int)__entry->ts_nsec, + __entry->state) + +); +TRACE_EVENT(mali_timeline_pm_event, + + TP_PROTO(u64 ts_sec, + u32 ts_nsec, + int event_type, + int pm_event_type, + unsigned int pm_event_id), + + TP_ARGS(ts_sec, + ts_nsec, + event_type, + pm_event_type, + pm_event_id), + + TP_STRUCT__entry( + __field(u64, ts_sec) + __field(u32, ts_nsec) + __field(int, event_type) + __field(int, pm_event_type) + __field(unsigned int, pm_event_id) + ), + + TP_fast_assign( + __entry->ts_sec = ts_sec; + __entry->ts_nsec = ts_nsec; + __entry->event_type = event_type; + __entry->pm_event_type = pm_event_type; + __entry->pm_event_id = pm_event_id; + ), + + TP_printk("%i,%i.%.9i,0,%i,%u", __entry->event_type, + (int)__entry->ts_sec, + (int)__entry->ts_nsec, + __entry->pm_event_type, __entry->pm_event_id) + +); + +TRACE_EVENT(mali_timeline_slot_atom, + + TP_PROTO(u64 ts_sec, + u32 ts_nsec, + int event_type, + int tgid, + int js, + int atom_id), + + TP_ARGS(ts_sec, + ts_nsec, + event_type, + tgid, + js, + atom_id), + + TP_STRUCT__entry( + __field(u64, ts_sec) + __field(u32, ts_nsec) + __field(int, event_type) + __field(int, tgid) + __field(int, js) + __field(int, atom_id) + ), + + TP_fast_assign( + __entry->ts_sec = ts_sec; + __entry->ts_nsec = ts_nsec; + __entry->event_type = event_type; + __entry->tgid = tgid; + __entry->js = js; + __entry->atom_id = atom_id; + ), + + TP_printk("%i,%i.%.9i,%i,%i,%i", __entry->event_type, + (int)__entry->ts_sec, + (int)__entry->ts_nsec, + __entry->tgid, + __entry->js, + __entry->atom_id) +); + +TRACE_EVENT(mali_timeline_pm_checktrans, + + TP_PROTO(u64 ts_sec, + u32 ts_nsec, + int trans_code, + int trans_id), + + TP_ARGS(ts_sec, + ts_nsec, + trans_code, + trans_id), + + TP_STRUCT__entry( + __field(u64, ts_sec) + __field(u32, ts_nsec) + __field(int, trans_code) + __field(int, trans_id) + ), + + TP_fast_assign( + __entry->ts_sec = ts_sec; + __entry->ts_nsec = ts_nsec; + __entry->trans_code = trans_code; + __entry->trans_id = trans_id; + ), + + TP_printk("%i,%i.%.9i,0,%i", __entry->trans_code, + (int)__entry->ts_sec, + (int)__entry->ts_nsec, + __entry->trans_id) + +); + +TRACE_EVENT(mali_timeline_context_active, + + TP_PROTO(u64 ts_sec, + u32 ts_nsec, + int count), + + TP_ARGS(ts_sec, + ts_nsec, + count), + + TP_STRUCT__entry( + __field(u64, ts_sec) + __field(u32, ts_nsec) + __field(int, count) + ), + + TP_fast_assign( + __entry->ts_sec = ts_sec; + __entry->ts_nsec = ts_nsec; + __entry->count = count; + ), + + TP_printk("%i,%i.%.9i,0,%i", SW_SET_CONTEXT_ACTIVE, + (int)__entry->ts_sec, + (int)__entry->ts_nsec, + __entry->count) +); + +#endif /* _MALI_TIMELINE_H */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . + +/* This part must be outside protection */ +#include + diff --git a/drivers/gpu/arm/midgard/mali_uk.h b/drivers/gpu/arm/midgard/mali_uk.h new file mode 100644 index 00000000000000..841d03fb5873bf --- /dev/null +++ b/drivers/gpu/arm/midgard/mali_uk.h @@ -0,0 +1,141 @@ +/* + * + * (C) COPYRIGHT 2010, 2012-2015 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + + +/** + * @file mali_uk.h + * Types and definitions that are common across OSs for both the user + * and kernel side of the User-Kernel interface. + */ + +#ifndef _UK_H_ +#define _UK_H_ + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/** + * @addtogroup base_api + * @{ + */ + +/** + * @defgroup uk_api User-Kernel Interface API + * + * The User-Kernel Interface abstracts the communication mechanism between the user and kernel-side code of device + * drivers developed as part of the Midgard DDK. Currently that includes the Base driver and the UMP driver. + * + * It exposes an OS independent API to user-side code (UKU) which routes functions calls to an OS-independent + * kernel-side API (UKK) via an OS-specific communication mechanism. + * + * This API is internal to the Midgard DDK and is not exposed to any applications. + * + * @{ + */ + +/** + * These are identifiers for kernel-side drivers implementing a UK interface, aka UKK clients. The + * UK module maps this to an OS specific device name, e.g. "gpu_base" -> "GPU0:". Specify this + * identifier to select a UKK client to the uku_open() function. + * + * When a new UKK client driver is created a new identifier needs to be added to the uk_client_id + * enumeration and the uku_open() implemenation for the various OS ports need to be updated to + * provide a mapping of the identifier to the OS specific device name. + * + */ +enum uk_client_id { + /** + * Value used to identify the Base driver UK client. + */ + UK_CLIENT_MALI_T600_BASE, + + /** The number of uk clients supported. This must be the last member of the enum */ + UK_CLIENT_COUNT +}; + +/** + * Each function callable through the UK interface has a unique number. + * Functions provided by UK clients start from number UK_FUNC_ID. + * Numbers below UK_FUNC_ID are used for internal UK functions. + */ +enum uk_func { + UKP_FUNC_ID_CHECK_VERSION, /**< UKK Core internal function */ + /** + * Each UK client numbers the functions they provide starting from + * number UK_FUNC_ID. This number is then eventually assigned to the + * id field of the union uk_header structure when preparing to make a + * UK call. See your UK client for a list of their function numbers. + */ + UK_FUNC_ID = 512 +}; + +/** + * Arguments for a UK call are stored in a structure. This structure consists + * of a fixed size header and a payload. The header carries a 32-bit number + * identifying the UK function to be called (see uk_func). When the UKK client + * receives this header and executed the requested UK function, it will use + * the same header to store the result of the function in the form of a + * int return code. The size of this structure is such that the + * first member of the payload following the header can be accessed efficiently + * on a 32 and 64-bit kernel and the structure has the same size regardless + * of a 32 or 64-bit kernel. The uk_kernel_size_type type should be defined + * accordingly in the OS specific mali_uk_os.h header file. + */ +union uk_header { + /** + * 32-bit number identifying the UK function to be called. + * Also see uk_func. + */ + u32 id; + /** + * The int return code returned by the called UK function. + * See the specification of the particular UK function you are + * calling for the meaning of the error codes returned. All + * UK functions return 0 on success. + */ + u32 ret; + /* + * Used to ensure 64-bit alignment of this union. Do not remove. + * This field is used for padding and does not need to be initialized. + */ + u64 sizer; +}; + +/** + * This structure carries a 16-bit major and minor number and is sent along with an internal UK call + * used during uku_open to identify the versions of the UK module in use by the user-side and kernel-side. + */ +struct uku_version_check_args { + union uk_header header; + /**< UK call header */ + u16 major; + /**< This field carries the user-side major version on input and the kernel-side major version on output */ + u16 minor; + /**< This field carries the user-side minor version on input and the kernel-side minor version on output. */ + u8 padding[4]; +}; + +/** @} end group uk_api */ + +/** @} *//* end group base_api */ + +#ifdef __cplusplus +} +#endif /* __cplusplus */ +#endif /* _UK_H_ */ diff --git a/drivers/gpu/arm/midgard/platform/Kconfig b/drivers/gpu/arm/midgard/platform/Kconfig new file mode 100644 index 00000000000000..38835d3d1531c7 --- /dev/null +++ b/drivers/gpu/arm/midgard/platform/Kconfig @@ -0,0 +1,24 @@ +# +# (C) COPYRIGHT 2012-2013, 2017 ARM Limited. All rights reserved. +# +# This program is free software and is provided to you under the terms of the +# GNU General Public License version 2 as published by the Free Software +# Foundation, and any use by you of this program is subject to the terms +# of such GNU licence. +# +# A copy of the licence is included with the program, and can also be obtained +# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, +# Boston, MA 02110-1301, USA. +# +# + + + + +# Add your platform specific Kconfig file here +# +# "drivers/gpu/arm/midgard/platform/xxx/Kconfig" +# +# Where xxx is the platform name is the name set in MALI_PLATFORM_NAME +# + diff --git a/drivers/gpu/arm/midgard/platform/devicetree/Kbuild b/drivers/gpu/arm/midgard/platform/devicetree/Kbuild new file mode 100644 index 00000000000000..d40d7982ff049c --- /dev/null +++ b/drivers/gpu/arm/midgard/platform/devicetree/Kbuild @@ -0,0 +1,18 @@ +# +# (C) COPYRIGHT 2012-2017 ARM Limited. All rights reserved. +# +# This program is free software and is provided to you under the terms of the +# GNU General Public License version 2 as published by the Free Software +# Foundation, and any use by you of this program is subject to the terms +# of such GNU licence. +# +# A copy of the licence is included with the program, and can also be obtained +# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, +# Boston, MA 02110-1301, USA. +# +# + + +mali_kbase-y += \ + $(MALI_PLATFORM_DIR)/mali_kbase_config_devicetree.o \ + $(MALI_PLATFORM_DIR)/mali_kbase_runtime_pm.o diff --git a/drivers/gpu/arm/midgard/platform/devicetree/mali_kbase_config_devicetree.c b/drivers/gpu/arm/midgard/platform/devicetree/mali_kbase_config_devicetree.c new file mode 100644 index 00000000000000..299d0e75fc0304 --- /dev/null +++ b/drivers/gpu/arm/midgard/platform/devicetree/mali_kbase_config_devicetree.c @@ -0,0 +1,42 @@ +/* + * + * (C) COPYRIGHT 2015, 2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +#include + +int kbase_platform_early_init(void) +{ + /* Nothing needed at this stage */ + return 0; +} + +static struct kbase_platform_config dummy_platform_config; + +struct kbase_platform_config *kbase_get_platform_config(void) +{ + return &dummy_platform_config; +} + +#ifndef CONFIG_OF +int kbase_platform_register(void) +{ + return 0; +} + +void kbase_platform_unregister(void) +{ +} +#endif diff --git a/drivers/gpu/arm/midgard/platform/devicetree/mali_kbase_config_platform.h b/drivers/gpu/arm/midgard/platform/devicetree/mali_kbase_config_platform.h new file mode 100644 index 00000000000000..2ceca34945b956 --- /dev/null +++ b/drivers/gpu/arm/midgard/platform/devicetree/mali_kbase_config_platform.h @@ -0,0 +1,80 @@ +/* + * + * (C) COPYRIGHT 2014-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +/** + * Maximum frequency GPU will be clocked at. Given in kHz. + * This must be specified as there is no default value. + * + * Attached value: number in kHz + * Default value: NA + */ +#define GPU_FREQ_KHZ_MAX (5000) +/** + * Minimum frequency GPU will be clocked at. Given in kHz. + * This must be specified as there is no default value. + * + * Attached value: number in kHz + * Default value: NA + */ +#define GPU_FREQ_KHZ_MIN (5000) + +/** + * CPU_SPEED_FUNC - A pointer to a function that calculates the CPU clock + * + * CPU clock speed of the platform is in MHz - see kbase_cpu_clk_speed_func + * for the function prototype. + * + * Attached value: A kbase_cpu_clk_speed_func. + * Default Value: NA + */ +#define CPU_SPEED_FUNC (NULL) + +/** + * GPU_SPEED_FUNC - A pointer to a function that calculates the GPU clock + * + * GPU clock speed of the platform in MHz - see kbase_gpu_clk_speed_func + * for the function prototype. + * + * Attached value: A kbase_gpu_clk_speed_func. + * Default Value: NA + */ +#define GPU_SPEED_FUNC (NULL) + +/** + * Power management configuration + * + * Attached value: pointer to @ref kbase_pm_callback_conf + * Default value: See @ref kbase_pm_callback_conf + */ +#define POWER_MANAGEMENT_CALLBACKS (&pm_callbacks) + +/** + * Platform specific configuration functions + * + * Attached value: pointer to @ref kbase_platform_funcs_conf + * Default value: See @ref kbase_platform_funcs_conf + */ +#define PLATFORM_FUNCS (NULL) + +extern struct kbase_pm_callback_conf pm_callbacks; + +/** + * Autosuspend delay + * + * The delay time (in milliseconds) to be used for autosuspend + */ +#define AUTO_SUSPEND_DELAY (100) diff --git a/drivers/gpu/arm/midgard/platform/devicetree/mali_kbase_runtime_pm.c b/drivers/gpu/arm/midgard/platform/devicetree/mali_kbase_runtime_pm.c new file mode 100644 index 00000000000000..372420aec6d91c --- /dev/null +++ b/drivers/gpu/arm/midgard/platform/devicetree/mali_kbase_runtime_pm.c @@ -0,0 +1,123 @@ +/* + * + * (C) COPYRIGHT 2015, 2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +#include +#include +#include +#include "mali_kbase_config_platform.h" + +static int pm_callback_power_on(struct kbase_device *kbdev) +{ + int ret = 1; /* Assume GPU has been powered off */ + int error; + + dev_dbg(kbdev->dev, "pm_callback_power_on %p\n", + (void *)kbdev->dev->pm_domain); + + error = pm_runtime_get_sync(kbdev->dev); + if (error == 1) { + /* + * Let core know that the chip has not been + * powered off, so we can save on re-initialization. + */ + ret = 0; + } + + dev_dbg(kbdev->dev, "pm_runtime_get_sync returned %d\n", error); + + return ret; +} + +static void pm_callback_power_off(struct kbase_device *kbdev) +{ + dev_dbg(kbdev->dev, "pm_callback_power_off\n"); + + pm_runtime_mark_last_busy(kbdev->dev); + pm_runtime_put_autosuspend(kbdev->dev); +} + +#ifdef KBASE_PM_RUNTIME +static int kbase_device_runtime_init(struct kbase_device *kbdev) +{ + int ret = 0; + + dev_dbg(kbdev->dev, "kbase_device_runtime_init\n"); + + pm_runtime_set_autosuspend_delay(kbdev->dev, AUTO_SUSPEND_DELAY); + pm_runtime_use_autosuspend(kbdev->dev); + + pm_runtime_set_active(kbdev->dev); + pm_runtime_enable(kbdev->dev); + + if (!pm_runtime_enabled(kbdev->dev)) { + dev_warn(kbdev->dev, "pm_runtime not enabled"); + ret = -ENOSYS; + } + + return ret; +} + +static void kbase_device_runtime_disable(struct kbase_device *kbdev) +{ + dev_dbg(kbdev->dev, "kbase_device_runtime_disable\n"); + pm_runtime_disable(kbdev->dev); +} +#endif + +static int pm_callback_runtime_on(struct kbase_device *kbdev) +{ + dev_dbg(kbdev->dev, "pm_callback_runtime_on\n"); + + return 0; +} + +static void pm_callback_runtime_off(struct kbase_device *kbdev) +{ + dev_dbg(kbdev->dev, "pm_callback_runtime_off\n"); +} + +static void pm_callback_resume(struct kbase_device *kbdev) +{ + int ret = pm_callback_runtime_on(kbdev); + + WARN_ON(ret); +} + +static void pm_callback_suspend(struct kbase_device *kbdev) +{ + pm_callback_runtime_off(kbdev); +} + +struct kbase_pm_callback_conf pm_callbacks = { + .power_on_callback = pm_callback_power_on, + .power_off_callback = pm_callback_power_off, + .power_suspend_callback = pm_callback_suspend, + .power_resume_callback = pm_callback_resume, +#ifdef KBASE_PM_RUNTIME + .power_runtime_init_callback = kbase_device_runtime_init, + .power_runtime_term_callback = kbase_device_runtime_disable, + .power_runtime_on_callback = pm_callback_runtime_on, + .power_runtime_off_callback = pm_callback_runtime_off, +#else /* KBASE_PM_RUNTIME */ + .power_runtime_init_callback = NULL, + .power_runtime_term_callback = NULL, + .power_runtime_on_callback = NULL, + .power_runtime_off_callback = NULL, +#endif /* KBASE_PM_RUNTIME */ +}; + + diff --git a/drivers/gpu/arm/midgard/platform/mali_kbase_platform_common.h b/drivers/gpu/arm/midgard/platform/mali_kbase_platform_common.h new file mode 100644 index 00000000000000..7cb3be7f78cef3 --- /dev/null +++ b/drivers/gpu/arm/midgard/platform/mali_kbase_platform_common.h @@ -0,0 +1,26 @@ +/* + * + * (C) COPYRIGHT 2010-2013 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +/** + * @brief Entry point to transfer control to a platform for early initialization + * + * This function is called early on in the initialization during execution of + * @ref kbase_driver_init. + * + * @return Zero to indicate success non-zero for failure. + */ +int kbase_platform_early_init(void); diff --git a/drivers/gpu/arm/midgard/platform/vexpress/Kbuild b/drivers/gpu/arm/midgard/platform/vexpress/Kbuild new file mode 100644 index 00000000000000..d9d5e90852317c --- /dev/null +++ b/drivers/gpu/arm/midgard/platform/vexpress/Kbuild @@ -0,0 +1,19 @@ +# +# (C) COPYRIGHT 2012-2013, 2016-2017 ARM Limited. All rights reserved. +# +# This program is free software and is provided to you under the terms of the +# GNU General Public License version 2 as published by the Free Software +# Foundation, and any use by you of this program is subject to the terms +# of such GNU licence. +# +# A copy of the licence is included with the program, and can also be obtained +# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, +# Boston, MA 02110-1301, USA. +# +# + + +mali_kbase-y += \ + $(MALI_PLATFORM_DIR)/mali_kbase_config_vexpress.o \ + $(MALI_PLATFORM_DIR)/mali_kbase_cpu_vexpress.o \ + mali_kbase_platform_fake.o diff --git a/drivers/gpu/arm/midgard/platform/vexpress/mali_kbase_config_platform.h b/drivers/gpu/arm/midgard/platform/vexpress/mali_kbase_config_platform.h new file mode 100644 index 00000000000000..02835f129aa33a --- /dev/null +++ b/drivers/gpu/arm/midgard/platform/vexpress/mali_kbase_config_platform.h @@ -0,0 +1,75 @@ +/* + * + * (C) COPYRIGHT 2014-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +#include "mali_kbase_cpu_vexpress.h" + +/** + * Maximum frequency GPU will be clocked at. Given in kHz. + * This must be specified as there is no default value. + * + * Attached value: number in kHz + * Default value: NA + */ +#define GPU_FREQ_KHZ_MAX kbase_get_platform_max_freq() +/** + * Minimum frequency GPU will be clocked at. Given in kHz. + * This must be specified as there is no default value. + * + * Attached value: number in kHz + * Default value: NA + */ +#define GPU_FREQ_KHZ_MIN kbase_get_platform_min_freq() + +/** + * CPU_SPEED_FUNC - A pointer to a function that calculates the CPU clock + * + * CPU clock speed of the platform is in MHz - see kbase_cpu_clk_speed_func + * for the function prototype. + * + * Attached value: A kbase_cpu_clk_speed_func. + * Default Value: NA + */ +#define CPU_SPEED_FUNC (&kbase_get_vexpress_cpu_clock_speed) + +/** + * GPU_SPEED_FUNC - A pointer to a function that calculates the GPU clock + * + * GPU clock speed of the platform in MHz - see kbase_gpu_clk_speed_func + * for the function prototype. + * + * Attached value: A kbase_gpu_clk_speed_func. + * Default Value: NA + */ +#define GPU_SPEED_FUNC (NULL) + +/** + * Power management configuration + * + * Attached value: pointer to @ref kbase_pm_callback_conf + * Default value: See @ref kbase_pm_callback_conf + */ +#define POWER_MANAGEMENT_CALLBACKS (&pm_callbacks) + +/** + * Platform specific configuration functions + * + * Attached value: pointer to @ref kbase_platform_funcs_conf + * Default value: See @ref kbase_platform_funcs_conf + */ +#define PLATFORM_FUNCS (NULL) + +extern struct kbase_pm_callback_conf pm_callbacks; diff --git a/drivers/gpu/arm/midgard/platform/vexpress/mali_kbase_config_vexpress.c b/drivers/gpu/arm/midgard/platform/vexpress/mali_kbase_config_vexpress.c new file mode 100644 index 00000000000000..745884f9a22d87 --- /dev/null +++ b/drivers/gpu/arm/midgard/platform/vexpress/mali_kbase_config_vexpress.c @@ -0,0 +1,72 @@ +/* + * + * (C) COPYRIGHT 2011-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + + +#include +#include +#include +#include +#include "mali_kbase_cpu_vexpress.h" +#include "mali_kbase_config_platform.h" + +#ifndef CONFIG_OF +static struct kbase_io_resources io_resources = { + .job_irq_number = 68, + .mmu_irq_number = 69, + .gpu_irq_number = 70, + .io_memory_region = { + .start = 0xFC010000, + .end = 0xFC010000 + (4096 * 4) - 1 + } +}; +#endif /* CONFIG_OF */ + +static int pm_callback_power_on(struct kbase_device *kbdev) +{ + /* Nothing is needed on VExpress, but we may have destroyed GPU state (if the below HARD_RESET code is active) */ + return 1; +} + +static void pm_callback_power_off(struct kbase_device *kbdev) +{ +} + +struct kbase_pm_callback_conf pm_callbacks = { + .power_on_callback = pm_callback_power_on, + .power_off_callback = pm_callback_power_off, + .power_suspend_callback = NULL, + .power_resume_callback = NULL +}; + +static struct kbase_platform_config versatile_platform_config = { +#ifndef CONFIG_OF + .io_resources = &io_resources +#endif +}; + +struct kbase_platform_config *kbase_get_platform_config(void) +{ + return &versatile_platform_config; +} + + +int kbase_platform_early_init(void) +{ + /* Nothing needed at this stage */ + return 0; +} diff --git a/drivers/gpu/arm/midgard/platform/vexpress/mali_kbase_cpu_vexpress.c b/drivers/gpu/arm/midgard/platform/vexpress/mali_kbase_cpu_vexpress.c new file mode 100644 index 00000000000000..4665f98cbbe488 --- /dev/null +++ b/drivers/gpu/arm/midgard/platform/vexpress/mali_kbase_cpu_vexpress.c @@ -0,0 +1,279 @@ +/* + * + * (C) COPYRIGHT 2011-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +#include +#include +#include "mali_kbase_cpu_vexpress.h" + +#define HZ_IN_MHZ (1000000) + +#define CORETILE_EXPRESS_A9X4_SCC_START (0x100E2000) +#define MOTHERBOARD_SYS_CFG_START (0x10000000) +#define SYS_CFGDATA_OFFSET (0x000000A0) +#define SYS_CFGCTRL_OFFSET (0x000000A4) +#define SYS_CFGSTAT_OFFSET (0x000000A8) + +#define SYS_CFGCTRL_START_BIT_VALUE (1 << 31) +#define READ_REG_BIT_VALUE (0 << 30) +#define DCC_DEFAULT_BIT_VALUE (0 << 26) +#define SYS_CFG_OSC_FUNC_BIT_VALUE (1 << 20) +#define SITE_DEFAULT_BIT_VALUE (1 << 16) +#define BOARD_STACK_POS_DEFAULT_BIT_VALUE (0 << 12) +#define DEVICE_DEFAULT_BIT_VALUE (2 << 0) +#define SYS_CFG_COMPLETE_BIT_VALUE (1 << 0) +#define SYS_CFG_ERROR_BIT_VALUE (1 << 1) + +#define FEED_REG_BIT_MASK (0x0F) +#define FCLK_PA_DIVIDE_BIT_SHIFT (0x03) +#define FCLK_PB_DIVIDE_BIT_SHIFT (0x07) +#define FCLK_PC_DIVIDE_BIT_SHIFT (0x0B) +#define AXICLK_PA_DIVIDE_BIT_SHIFT (0x0F) +#define AXICLK_PB_DIVIDE_BIT_SHIFT (0x13) + +/* the following three values used for reading + * HBI value of the LogicTile daughterboard */ +#define VE_MOTHERBOARD_PERIPHERALS_SMB_CS7 (0x10000000) +#define VE_SYS_PROC_ID1_OFFSET (0x00000088) +#define VE_LOGIC_TILE_HBI_MASK (0x00000FFF) + +#define IS_SINGLE_BIT_SET(val, pos) (val&(1<> + FCLK_PA_DIVIDE_BIT_SHIFT); + /* CFGRW0[10:7] */ + pb_divide = ((reg_val & (FEED_REG_BIT_MASK << + FCLK_PB_DIVIDE_BIT_SHIFT)) >> + FCLK_PB_DIVIDE_BIT_SHIFT); + *cpu_clock = osc2_value * (pa_divide + 1) / (pb_divide + 1); + } else if (IS_SINGLE_BIT_SET(reg_val, 1)) { + /* CFGRW0[1] - CLKOC */ + /* CFGRW0[6:3] */ + pa_divide = ((reg_val & (FEED_REG_BIT_MASK << + FCLK_PA_DIVIDE_BIT_SHIFT)) >> + FCLK_PA_DIVIDE_BIT_SHIFT); + /* CFGRW0[14:11] */ + pc_divide = ((reg_val & (FEED_REG_BIT_MASK << + FCLK_PC_DIVIDE_BIT_SHIFT)) >> + FCLK_PC_DIVIDE_BIT_SHIFT); + *cpu_clock = osc2_value * (pa_divide + 1) / (pc_divide + 1); + } else if (IS_SINGLE_BIT_SET(reg_val, 2)) { + /* CFGRW0[2] - FACLK */ + /* CFGRW0[18:15] */ + pa_divide = ((reg_val & (FEED_REG_BIT_MASK << + AXICLK_PA_DIVIDE_BIT_SHIFT)) >> + AXICLK_PA_DIVIDE_BIT_SHIFT); + /* CFGRW0[22:19] */ + pb_divide = ((reg_val & (FEED_REG_BIT_MASK << + AXICLK_PB_DIVIDE_BIT_SHIFT)) >> + AXICLK_PB_DIVIDE_BIT_SHIFT); + *cpu_clock = osc2_value * (pa_divide + 1) / (pb_divide + 1); + } else { + err = -EIO; + } + +set_reg_error: +ongoing_request: + raw_spin_unlock(&syscfg_lock); + *cpu_clock /= HZ_IN_MHZ; + + if (!err) + cpu_clock_speed = *cpu_clock; + + iounmap(scc_reg); + +scc_reg_map_failed: + iounmap(syscfg_reg); + +syscfg_reg_map_failed: + + return err; +} + +/** + * kbase_get_platform_logic_tile_type - determines which LogicTile type + * is used by Versatile Express + * + * When platform_config build parameter is specified as vexpress, i.e., + * platform_config=vexpress, GPU frequency may vary dependent on the + * particular platform. The GPU frequency depends on the LogicTile type. + * + * This function determines which LogicTile type is used by the platform by + * reading the HBI value of the daughterboard which holds the LogicTile: + * + * 0x217 HBI0217 Virtex-6 + * 0x192 HBI0192 Virtex-5 + * 0x247 HBI0247 Virtex-7 + * + * Return: HBI value of the logic tile daughterboard, zero if not accessible + */ +static u32 kbase_get_platform_logic_tile_type(void) +{ + void __iomem *syscfg_reg = NULL; + u32 sys_procid1 = 0; + + syscfg_reg = ioremap(VE_MOTHERBOARD_PERIPHERALS_SMB_CS7 + VE_SYS_PROC_ID1_OFFSET, 4); + if (NULL != syscfg_reg) { + sys_procid1 = readl(syscfg_reg); + iounmap(syscfg_reg); + } + + return sys_procid1 & VE_LOGIC_TILE_HBI_MASK; +} + +u32 kbase_get_platform_min_freq(void) +{ + u32 ve_logic_tile = kbase_get_platform_logic_tile_type(); + + switch (ve_logic_tile) { + case 0x217: + /* Virtex 6, HBI0217 */ + return VE_VIRTEX6_GPU_FREQ_MIN; + case 0x247: + /* Virtex 7, HBI0247 */ + return VE_VIRTEX7_GPU_FREQ_MIN; + default: + /* all other logic tiles, i.e., Virtex 5 HBI0192 + * or unsuccessful reading from the platform - + * fall back to some default value */ + return VE_DEFAULT_GPU_FREQ_MIN; + } +} + +u32 kbase_get_platform_max_freq(void) +{ + u32 ve_logic_tile = kbase_get_platform_logic_tile_type(); + + switch (ve_logic_tile) { + case 0x217: + /* Virtex 6, HBI0217 */ + return VE_VIRTEX6_GPU_FREQ_MAX; + case 0x247: + /* Virtex 7, HBI0247 */ + return VE_VIRTEX7_GPU_FREQ_MAX; + default: + /* all other logic tiles, i.e., Virtex 5 HBI0192 + * or unsuccessful reading from the platform - + * fall back to some default value */ + return VE_DEFAULT_GPU_FREQ_MAX; + } +} diff --git a/drivers/gpu/arm/midgard/platform/vexpress/mali_kbase_cpu_vexpress.h b/drivers/gpu/arm/midgard/platform/vexpress/mali_kbase_cpu_vexpress.h new file mode 100644 index 00000000000000..da865698133a34 --- /dev/null +++ b/drivers/gpu/arm/midgard/platform/vexpress/mali_kbase_cpu_vexpress.h @@ -0,0 +1,38 @@ +/* + * + * (C) COPYRIGHT 2012-2013, 2015-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + + +#ifndef _KBASE_CPU_VEXPRESS_H_ +#define _KBASE_CPU_VEXPRESS_H_ + +/** + * Versatile Express implementation of @ref kbase_cpu_clk_speed_func. + */ +int kbase_get_vexpress_cpu_clock_speed(u32 *cpu_clock); + +/** + * Get the minimum GPU frequency for the attached logic tile + */ +u32 kbase_get_platform_min_freq(void); + +/** + * Get the maximum GPU frequency for the attached logic tile + */ +u32 kbase_get_platform_max_freq(void); + +#endif /* _KBASE_CPU_VEXPRESS_H_ */ diff --git a/drivers/gpu/arm/midgard/platform/vexpress_1xv7_a57/Kbuild b/drivers/gpu/arm/midgard/platform/vexpress_1xv7_a57/Kbuild new file mode 100644 index 00000000000000..df87c74f43baf3 --- /dev/null +++ b/drivers/gpu/arm/midgard/platform/vexpress_1xv7_a57/Kbuild @@ -0,0 +1,18 @@ +# +# (C) COPYRIGHT 2013-2014, 2016-2017 ARM Limited. All rights reserved. +# +# This program is free software and is provided to you under the terms of the +# GNU General Public License version 2 as published by the Free Software +# Foundation, and any use by you of this program is subject to the terms +# of such GNU licence. +# +# A copy of the licence is included with the program, and can also be obtained +# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, +# Boston, MA 02110-1301, USA. +# +# + + +mali_kbase-y += \ + $(MALI_PLATFORM_DIR)/mali_kbase_config_vexpress.o \ + mali_kbase_platform_fake.o diff --git a/drivers/gpu/arm/midgard/platform/vexpress_1xv7_a57/mali_kbase_config_platform.h b/drivers/gpu/arm/midgard/platform/vexpress_1xv7_a57/mali_kbase_config_platform.h new file mode 100644 index 00000000000000..0efbf3962f984c --- /dev/null +++ b/drivers/gpu/arm/midgard/platform/vexpress_1xv7_a57/mali_kbase_config_platform.h @@ -0,0 +1,73 @@ +/* + * + * (C) COPYRIGHT 2014-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +/** + * Maximum frequency GPU will be clocked at. Given in kHz. + * This must be specified as there is no default value. + * + * Attached value: number in kHz + * Default value: NA + */ +#define GPU_FREQ_KHZ_MAX 5000 +/** + * Minimum frequency GPU will be clocked at. Given in kHz. + * This must be specified as there is no default value. + * + * Attached value: number in kHz + * Default value: NA + */ +#define GPU_FREQ_KHZ_MIN 5000 + +/** + * CPU_SPEED_FUNC - A pointer to a function that calculates the CPU clock + * + * CPU clock speed of the platform is in MHz - see kbase_cpu_clk_speed_func + * for the function prototype. + * + * Attached value: A kbase_cpu_clk_speed_func. + * Default Value: NA + */ +#define CPU_SPEED_FUNC (&kbase_cpuprops_get_default_clock_speed) + +/** + * GPU_SPEED_FUNC - A pointer to a function that calculates the GPU clock + * + * GPU clock speed of the platform in MHz - see kbase_gpu_clk_speed_func + * for the function prototype. + * + * Attached value: A kbase_gpu_clk_speed_func. + * Default Value: NA + */ +#define GPU_SPEED_FUNC (NULL) + +/** + * Power management configuration + * + * Attached value: pointer to @ref kbase_pm_callback_conf + * Default value: See @ref kbase_pm_callback_conf + */ +#define POWER_MANAGEMENT_CALLBACKS (&pm_callbacks) + +/** + * Platform specific configuration functions + * + * Attached value: pointer to @ref kbase_platform_funcs_conf + * Default value: See @ref kbase_platform_funcs_conf + */ +#define PLATFORM_FUNCS (NULL) + +extern struct kbase_pm_callback_conf pm_callbacks; diff --git a/drivers/gpu/arm/midgard/platform/vexpress_1xv7_a57/mali_kbase_config_vexpress.c b/drivers/gpu/arm/midgard/platform/vexpress_1xv7_a57/mali_kbase_config_vexpress.c new file mode 100644 index 00000000000000..5a69758346ff06 --- /dev/null +++ b/drivers/gpu/arm/midgard/platform/vexpress_1xv7_a57/mali_kbase_config_vexpress.c @@ -0,0 +1,66 @@ +/* + * + * (C) COPYRIGHT 2011-2014, 2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +#include +#include +#include +#include + +#ifndef CONFIG_OF +static struct kbase_io_resources io_resources = { + .job_irq_number = 68, + .mmu_irq_number = 69, + .gpu_irq_number = 70, + .io_memory_region = { + .start = 0x2f010000, + .end = 0x2f010000 + (4096 * 4) - 1} +}; +#endif + +static int pm_callback_power_on(struct kbase_device *kbdev) +{ + /* Nothing is needed on VExpress, but we may have destroyed GPU state (if the below HARD_RESET code is active) */ + return 1; +} + +static void pm_callback_power_off(struct kbase_device *kbdev) +{ +} + +struct kbase_pm_callback_conf pm_callbacks = { + .power_on_callback = pm_callback_power_on, + .power_off_callback = pm_callback_power_off, + .power_suspend_callback = NULL, + .power_resume_callback = NULL +}; + +static struct kbase_platform_config versatile_platform_config = { +#ifndef CONFIG_OF + .io_resources = &io_resources +#endif +}; + +struct kbase_platform_config *kbase_get_platform_config(void) +{ + return &versatile_platform_config; +} + +int kbase_platform_early_init(void) +{ + /* Nothing needed at this stage */ + return 0; +} diff --git a/drivers/gpu/arm/midgard/platform/vexpress_6xvirtex7_10mhz/Kbuild b/drivers/gpu/arm/midgard/platform/vexpress_6xvirtex7_10mhz/Kbuild new file mode 100644 index 00000000000000..d9d5e90852317c --- /dev/null +++ b/drivers/gpu/arm/midgard/platform/vexpress_6xvirtex7_10mhz/Kbuild @@ -0,0 +1,19 @@ +# +# (C) COPYRIGHT 2012-2013, 2016-2017 ARM Limited. All rights reserved. +# +# This program is free software and is provided to you under the terms of the +# GNU General Public License version 2 as published by the Free Software +# Foundation, and any use by you of this program is subject to the terms +# of such GNU licence. +# +# A copy of the licence is included with the program, and can also be obtained +# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, +# Boston, MA 02110-1301, USA. +# +# + + +mali_kbase-y += \ + $(MALI_PLATFORM_DIR)/mali_kbase_config_vexpress.o \ + $(MALI_PLATFORM_DIR)/mali_kbase_cpu_vexpress.o \ + mali_kbase_platform_fake.o diff --git a/drivers/gpu/arm/midgard/platform/vexpress_6xvirtex7_10mhz/mali_kbase_config_platform.h b/drivers/gpu/arm/midgard/platform/vexpress_6xvirtex7_10mhz/mali_kbase_config_platform.h new file mode 100644 index 00000000000000..dbdf21e009f994 --- /dev/null +++ b/drivers/gpu/arm/midgard/platform/vexpress_6xvirtex7_10mhz/mali_kbase_config_platform.h @@ -0,0 +1,75 @@ +/* + * + * (C) COPYRIGHT 2014-2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +#include "mali_kbase_cpu_vexpress.h" + +/** + * Maximum frequency GPU will be clocked at. Given in kHz. + * This must be specified as there is no default value. + * + * Attached value: number in kHz + * Default value: NA + */ +#define GPU_FREQ_KHZ_MAX 10000 +/** + * Minimum frequency GPU will be clocked at. Given in kHz. + * This must be specified as there is no default value. + * + * Attached value: number in kHz + * Default value: NA + */ +#define GPU_FREQ_KHZ_MIN 10000 + +/** + * CPU_SPEED_FUNC - A pointer to a function that calculates the CPU clock + * + * CPU clock speed of the platform is in MHz - see kbase_cpu_clk_speed_func + * for the function prototype. + * + * Attached value: A kbase_cpu_clk_speed_func. + * Default Value: NA + */ +#define CPU_SPEED_FUNC (&kbase_get_vexpress_cpu_clock_speed) + +/** + * GPU_SPEED_FUNC - A pointer to a function that calculates the GPU clock + * + * GPU clock speed of the platform in MHz - see kbase_gpu_clk_speed_func + * for the function prototype. + * + * Attached value: A kbase_gpu_clk_speed_func. + * Default Value: NA + */ +#define GPU_SPEED_FUNC (NULL) + +/** + * Power management configuration + * + * Attached value: pointer to @ref kbase_pm_callback_conf + * Default value: See @ref kbase_pm_callback_conf + */ +#define POWER_MANAGEMENT_CALLBACKS (&pm_callbacks) + +/** + * Platform specific configuration functions + * + * Attached value: pointer to @ref kbase_platform_funcs_conf + * Default value: See @ref kbase_platform_funcs_conf + */ +#define PLATFORM_FUNCS (NULL) + +extern struct kbase_pm_callback_conf pm_callbacks; diff --git a/drivers/gpu/arm/midgard/platform/vexpress_6xvirtex7_10mhz/mali_kbase_config_vexpress.c b/drivers/gpu/arm/midgard/platform/vexpress_6xvirtex7_10mhz/mali_kbase_config_vexpress.c new file mode 100644 index 00000000000000..5d8ec2d4a115bb --- /dev/null +++ b/drivers/gpu/arm/midgard/platform/vexpress_6xvirtex7_10mhz/mali_kbase_config_vexpress.c @@ -0,0 +1,70 @@ +/* + * + * (C) COPYRIGHT 2011-2014, 2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + + +#include +#include +#include +#include +#include "mali_kbase_cpu_vexpress.h" + +#ifndef CONFIG_OF +static struct kbase_io_resources io_resources = { + .job_irq_number = 75, + .mmu_irq_number = 76, + .gpu_irq_number = 77, + .io_memory_region = { + .start = 0x2F000000, + .end = 0x2F000000 + (4096 * 4) - 1} +}; +#endif + +static int pm_callback_power_on(struct kbase_device *kbdev) +{ + /* Nothing is needed on VExpress, but we may have destroyed GPU state (if the below HARD_RESET code is active) */ + return 1; +} + +static void pm_callback_power_off(struct kbase_device *kbdev) +{ +} + +struct kbase_pm_callback_conf pm_callbacks = { + .power_on_callback = pm_callback_power_on, + .power_off_callback = pm_callback_power_off, + .power_suspend_callback = NULL, + .power_resume_callback = NULL +}; + +static struct kbase_platform_config versatile_platform_config = { +#ifndef CONFIG_OF + .io_resources = &io_resources +#endif +}; + +struct kbase_platform_config *kbase_get_platform_config(void) +{ + return &versatile_platform_config; +} + +int kbase_platform_early_init(void) +{ + /* Nothing needed at this stage */ + return 0; +} + diff --git a/drivers/gpu/arm/midgard/platform/vexpress_6xvirtex7_10mhz/mali_kbase_cpu_vexpress.c b/drivers/gpu/arm/midgard/platform/vexpress_6xvirtex7_10mhz/mali_kbase_cpu_vexpress.c new file mode 100644 index 00000000000000..816dff49835f30 --- /dev/null +++ b/drivers/gpu/arm/midgard/platform/vexpress_6xvirtex7_10mhz/mali_kbase_cpu_vexpress.c @@ -0,0 +1,71 @@ +/* + * + * (C) COPYRIGHT 2011-2013 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + + + +#include +#include +#include "mali_kbase_cpu_vexpress.h" + +#define HZ_IN_MHZ (1000000) + +#define CORETILE_EXPRESS_A9X4_SCC_START (0x100E2000) +#define MOTHERBOARD_SYS_CFG_START (0x10000000) +#define SYS_CFGDATA_OFFSET (0x000000A0) +#define SYS_CFGCTRL_OFFSET (0x000000A4) +#define SYS_CFGSTAT_OFFSET (0x000000A8) + +#define SYS_CFGCTRL_START_BIT_VALUE (1 << 31) +#define READ_REG_BIT_VALUE (0 << 30) +#define DCC_DEFAULT_BIT_VALUE (0 << 26) +#define SYS_CFG_OSC_FUNC_BIT_VALUE (1 << 20) +#define SITE_DEFAULT_BIT_VALUE (1 << 16) +#define BOARD_STACK_POS_DEFAULT_BIT_VALUE (0 << 12) +#define DEVICE_DEFAULT_BIT_VALUE (2 << 0) +#define SYS_CFG_COMPLETE_BIT_VALUE (1 << 0) +#define SYS_CFG_ERROR_BIT_VALUE (1 << 1) + +#define FEED_REG_BIT_MASK (0x0F) +#define FCLK_PA_DIVIDE_BIT_SHIFT (0x03) +#define FCLK_PB_DIVIDE_BIT_SHIFT (0x07) +#define FCLK_PC_DIVIDE_BIT_SHIFT (0x0B) +#define AXICLK_PA_DIVIDE_BIT_SHIFT (0x0F) +#define AXICLK_PB_DIVIDE_BIT_SHIFT (0x13) + +#define IS_SINGLE_BIT_SET(val, pos) (val&(1< + +/** + * @addtogroup uk_api User-Kernel Interface API + * @{ + */ + +/** + * @addtogroup uk_api_kernel UKK (Kernel side) + * @{ + */ + +/** + * Internal OS specific data structure associated with each UKK session. Part + * of a ukk_session object. + */ +typedef struct ukkp_session { + int dummy; /**< No internal OS specific data at this time */ +} ukkp_session; + +/** @} end group uk_api_kernel */ + +/** @} end group uk_api */ + +#endif /* _UKK_OS_H__ */ diff --git a/drivers/gpu/arm/midgard/protected_mode_switcher.h b/drivers/gpu/arm/midgard/protected_mode_switcher.h new file mode 100644 index 00000000000000..5dc2f3ba8cf625 --- /dev/null +++ b/drivers/gpu/arm/midgard/protected_mode_switcher.h @@ -0,0 +1,64 @@ +/* + * + * (C) COPYRIGHT 2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +#ifndef _PROTECTED_MODE_SWITCH_H_ +#define _PROTECTED_MODE_SWITCH_H_ + +struct protected_mode_device; + +/** + * struct protected_mode_ops - Callbacks for protected mode switch operations + * + * @protected_mode_enable: Callback to enable protected mode for device + * @protected_mode_disable: Callback to disable protected mode for device + */ +struct protected_mode_ops { + /** + * protected_mode_enable() - Enable protected mode on device + * @dev: The struct device + * + * Return: 0 on success, non-zero on error + */ + int (*protected_mode_enable)( + struct protected_mode_device *protected_dev); + + /** + * protected_mode_disable() - Disable protected mode on device, and + * reset device + * @dev: The struct device + * + * Return: 0 on success, non-zero on error + */ + int (*protected_mode_disable)( + struct protected_mode_device *protected_dev); +}; + +/** + * struct protected_mode_device - Device structure for protected mode devices + * + * @ops - Callbacks associated with this device + * @data - Pointer to device private data + * + * This structure should be registered with the platform device using + * platform_set_drvdata(). + */ +struct protected_mode_device { + struct protected_mode_ops ops; + void *data; +}; + +#endif /* _PROTECTED_MODE_SWITCH_H_ */ diff --git a/drivers/gpu/arm/midgard/tests/Kbuild b/drivers/gpu/arm/midgard/tests/Kbuild new file mode 100644 index 00000000000000..b4bed047343910 --- /dev/null +++ b/drivers/gpu/arm/midgard/tests/Kbuild @@ -0,0 +1,17 @@ +# +# (C) COPYRIGHT 2017 ARM Limited. All rights reserved. +# +# This program is free software and is provided to you under the terms of the +# GNU General Public License version 2 as published by the Free Software +# Foundation, and any use by you of this program is subject to the terms +# of such GNU licence. +# +# A copy of the licence is included with the program, and can also be obtained +# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, +# Boston, MA 02110-1301, USA. +# +# + + +obj-$(CONFIG_MALI_KUTF) += kutf/ +obj-$(CONFIG_MALI_IRQ_LATENCY) += mali_kutf_irq_test/ diff --git a/drivers/gpu/arm/midgard/tests/Kconfig b/drivers/gpu/arm/midgard/tests/Kconfig new file mode 100644 index 00000000000000..da0515c065de90 --- /dev/null +++ b/drivers/gpu/arm/midgard/tests/Kconfig @@ -0,0 +1,17 @@ +# +# (C) COPYRIGHT 2017 ARM Limited. All rights reserved. +# +# This program is free software and is provided to you under the terms of the +# GNU General Public License version 2 as published by the Free Software +# Foundation, and any use by you of this program is subject to the terms +# of such GNU licence. +# +# A copy of the licence is included with the program, and can also be obtained +# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, +# Boston, MA 02110-1301, USA. +# +# + + +source "drivers/gpu/arm/midgard/tests/kutf/Kconfig" +source "drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/Kconfig" diff --git a/drivers/gpu/arm/midgard/tests/include/kutf/kutf_helpers.h b/drivers/gpu/arm/midgard/tests/include/kutf/kutf_helpers.h new file mode 100644 index 00000000000000..3667687a004686 --- /dev/null +++ b/drivers/gpu/arm/midgard/tests/include/kutf/kutf_helpers.h @@ -0,0 +1,72 @@ +/* + * + * (C) COPYRIGHT 2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +#ifndef _KERNEL_UTF_HELPERS_H_ +#define _KERNEL_UTF_HELPERS_H_ + +/* kutf_helpers.h + * Test helper functions for the kernel UTF test infrastructure. + * + * These functions provide methods for enqueuing/dequeuing lines of text sent + * by user space. They are used to implement the transfer of "userdata" from + * user space to kernel. + */ + +#include + +/** + * kutf_helper_input_dequeue() - Dequeue a line sent by user space + * @context: KUTF context + * @str_size: Pointer to an integer to receive the size of the string + * + * If no line is available then this function will wait (interruptibly) until + * a line is available. + * + * Return: The line dequeued, ERR_PTR(-EINTR) if interrupted or NULL on end + * of data. + */ +char *kutf_helper_input_dequeue(struct kutf_context *context, size_t *str_size); + +/** + * kutf_helper_input_enqueue() - Enqueue a line sent by user space + * @context: KUTF context + * @str: The user space address of the line + * @size: The length in bytes of the string + * + * This function will use copy_from_user to copy the string out of user space. + * The string need not be NULL-terminated (@size should not include the NULL + * termination). + * + * As a special case @str==NULL and @size==0 is valid to mark the end of input, + * but callers should use kutf_helper_input_enqueue_end_of_data() instead. + * + * Return: 0 on success, -EFAULT if the line cannot be copied from user space, + * -ENOMEM if out of memory. + */ +int kutf_helper_input_enqueue(struct kutf_context *context, + const char __user *str, size_t size); + +/** + * kutf_helper_input_enqueue_end_of_data() - Signal no more data is to be sent + * @context: KUTF context + * + * After this function has been called, kutf_helper_input_dequeue() will always + * return NULL. + */ +void kutf_helper_input_enqueue_end_of_data(struct kutf_context *context); + +#endif /* _KERNEL_UTF_HELPERS_H_ */ diff --git a/drivers/gpu/arm/midgard/tests/include/kutf/kutf_helpers_user.h b/drivers/gpu/arm/midgard/tests/include/kutf/kutf_helpers_user.h new file mode 100644 index 00000000000000..c7b5263033cfde --- /dev/null +++ b/drivers/gpu/arm/midgard/tests/include/kutf/kutf_helpers_user.h @@ -0,0 +1,174 @@ +/* + * + * (C) COPYRIGHT 2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +#ifndef _KERNEL_UTF_HELPERS_USER_H_ +#define _KERNEL_UTF_HELPERS_USER_H_ + +/* kutf_helpers.h + * Test helper functions for the kernel UTF test infrastructure, whose + * implementation mirrors that of similar functions for kutf-userside + */ + +#include +#include + + +#define KUTF_HELPER_MAX_VAL_NAME_LEN 255 + +enum kutf_helper_valtype { + KUTF_HELPER_VALTYPE_INVALID, + KUTF_HELPER_VALTYPE_U64, + KUTF_HELPER_VALTYPE_STR, + + KUTF_HELPER_VALTYPE_COUNT /* Must be last */ +}; + +struct kutf_helper_named_val { + enum kutf_helper_valtype type; + char *val_name; + union { + u64 val_u64; + char *val_str; + } u; +}; + +/* Extra error values for certain helpers when we want to distinguish between + * Linux's own error values too. + * + * These can only be used on certain functions returning an int type that are + * documented as returning one of these potential values, they cannot be used + * from functions return a ptr type, since we can't decode it with PTR_ERR + * + * No negative values are used - Linux error codes should be used instead, and + * indicate a problem in accessing the data file itself (are generally + * unrecoverable) + * + * Positive values indicate correct access but invalid parsing (can be + * recovered from assuming data in the future is correct) */ +enum kutf_helper_err { + /* No error - must be zero */ + KUTF_HELPER_ERR_NONE = 0, + /* Named value parsing encountered an invalid name */ + KUTF_HELPER_ERR_INVALID_NAME, + /* Named value parsing of string or u64 type encountered extra + * characters after the value (after the last digit for a u64 type or + * after the string end delimiter for string type) */ + KUTF_HELPER_ERR_CHARS_AFTER_VAL, + /* Named value parsing of string type couldn't find the string end + * delimiter. + * + * This cannot be encountered when the NAME="value" message exceeds the + * textbuf's maximum line length, because such messages are not checked + * for an end string delimiter */ + KUTF_HELPER_ERR_NO_END_DELIMITER, + /* Named value didn't parse as any of the known types */ + KUTF_HELPER_ERR_INVALID_VALUE, +}; + + +/* Send named NAME=value pair, u64 value + * + * NAME must match [A-Z0-9_]\+ and can be up to MAX_VAL_NAME_LEN characters long + * + * Any failure will be logged on the suite's current test fixture + * + * Returns 0 on success, non-zero on failure + */ +int kutf_helper_send_named_u64(struct kutf_context *context, + const char *val_name, u64 val); + +/* Get the maximum length of a string that can be represented as a particular + * NAME="value" pair without string-value truncation in the kernel's buffer + * + * Given val_name and the kernel buffer's size, this can be used to determine + * the maximum length of a string that can be sent as val_name="value" pair + * without having the string value truncated. Any string longer than this will + * be truncated at some point during communication to this size. + * + * It is assumed that val_name is a valid name for + * kutf_helper_send_named_str(), and no checking will be made to + * ensure this. + * + * Returns the maximum string length that can be represented, or a negative + * value if the NAME="value" encoding itself wouldn't fit in kern_buf_sz + */ +int kutf_helper_max_str_len_for_kern(const char *val_name, int kern_buf_sz); + +/* Send named NAME="str" pair + * + * no escaping allowed in str. Any of the following characters will terminate + * the string: '"' '\\' '\n' + * + * NAME must match [A-Z0-9_]\+ and can be up to MAX_VAL_NAME_LEN characters long + * + * Any failure will be logged on the suite's current test fixture + * + * Returns 0 on success, non-zero on failure */ +int kutf_helper_send_named_str(struct kutf_context *context, + const char *val_name, const char *val_str); + +/* Receive named NAME=value pair + * + * This can receive u64 and string values - check named_val->type + * + * If you are not planning on dynamic handling of the named value's name and + * type, then kutf_helper_receive_check_val() is more useful as a + * convenience function. + * + * String members of named_val will come from memory allocated on the fixture's mempool + * + * Returns 0 on success. Negative value on failure to receive from the 'run' + * file, positive value indicates an enum kutf_helper_err value for correct + * reception of data but invalid parsing */ +int kutf_helper_receive_named_val( + struct kutf_context *context, + struct kutf_helper_named_val *named_val); + +/* Receive and validate NAME=value pair + * + * As with kutf_helper_receive_named_val, but validate that the + * name and type are as expected, as a convenience for a common pattern found + * in tests. + * + * NOTE: this only returns an error value if there was actually a problem + * receiving data. + * + * NOTE: If the underlying data was received correctly, but: + * - isn't of the expected name + * - isn't the expected type + * - isn't correctly parsed for the type + * then the following happens: + * - failure result is recorded + * - named_val->type will be KUTF_HELPER_VALTYPE_INVALID + * - named_val->u will contain some default value that should be relatively + * harmless for the test, including being writable in the case of string + * values + * - return value will be 0 to indicate success + * + * The rationale behind this is that we'd prefer to continue the rest of the + * test with failures propagated, rather than hitting a timeout */ +int kutf_helper_receive_check_val( + struct kutf_helper_named_val *named_val, + struct kutf_context *context, + const char *expect_val_name, + enum kutf_helper_valtype expect_val_type); + +/* Output a named value to kmsg */ +void kutf_helper_output_named_val(struct kutf_helper_named_val *named_val); + + +#endif /* _KERNEL_UTF_HELPERS_USER_H_ */ diff --git a/drivers/gpu/arm/midgard/tests/include/kutf/kutf_mem.h b/drivers/gpu/arm/midgard/tests/include/kutf/kutf_mem.h new file mode 100644 index 00000000000000..584c9dd4bc138f --- /dev/null +++ b/drivers/gpu/arm/midgard/tests/include/kutf/kutf_mem.h @@ -0,0 +1,68 @@ +/* + * + * (C) COPYRIGHT 2014, 2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +#ifndef _KERNEL_UTF_MEM_H_ +#define _KERNEL_UTF_MEM_H_ + +/* kutf_mem.h + * Functions for management of memory pools in the kernel. + * + * This module implements a memory pool allocator, allowing a test + * implementation to allocate linked allocations which can then be freed by a + * single free which releases all of the resources held by the entire pool. + * + * Note that it is not possible to free single resources within the pool once + * allocated. + */ + +#include +#include + +/** + * struct kutf_mempool - the memory pool context management structure + * @head: list head on which the allocations in this context are added to + * @lock: mutex for concurrent allocation from multiple threads + * + */ +struct kutf_mempool { + struct list_head head; + struct mutex lock; +}; + +/** + * kutf_mempool_init() - Initialize a memory pool. + * @pool: Memory pool structure to initialize, provided by the user + * + * Return: zero on success + */ +int kutf_mempool_init(struct kutf_mempool *pool); + +/** + * kutf_mempool_alloc() - Allocate memory from a pool + * @pool: Memory pool to allocate from + * @size: Size of memory wanted in number of bytes + * + * Return: Pointer to memory on success, NULL on failure. + */ +void *kutf_mempool_alloc(struct kutf_mempool *pool, size_t size); + +/** + * kutf_mempool_destroy() - Destroy a memory pool, freeing all memory within it. + * @pool: The memory pool to free + */ +void kutf_mempool_destroy(struct kutf_mempool *pool); +#endif /* _KERNEL_UTF_MEM_H_ */ diff --git a/drivers/gpu/arm/midgard/tests/include/kutf/kutf_resultset.h b/drivers/gpu/arm/midgard/tests/include/kutf/kutf_resultset.h new file mode 100644 index 00000000000000..6787f7188f861b --- /dev/null +++ b/drivers/gpu/arm/midgard/tests/include/kutf/kutf_resultset.h @@ -0,0 +1,176 @@ +/* + * + * (C) COPYRIGHT 2014, 2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +#ifndef _KERNEL_UTF_RESULTSET_H_ +#define _KERNEL_UTF_RESULTSET_H_ + +/* kutf_resultset.h + * Functions and structures for handling test results and result sets. + * + * This section of the kernel UTF contains structures and functions used for the + * management of Results and Result Sets. + */ + +/** + * enum kutf_result_status - Status values for a single Test error. + * @KUTF_RESULT_BENCHMARK: Result is a meta-result containing benchmark + * results. + * @KUTF_RESULT_SKIP: The test was skipped. + * @KUTF_RESULT_UNKNOWN: The test has an unknown result. + * @KUTF_RESULT_PASS: The test result passed. + * @KUTF_RESULT_DEBUG: The test result passed, but raised a debug + * message. + * @KUTF_RESULT_INFO: The test result passed, but raised + * an informative message. + * @KUTF_RESULT_WARN: The test result passed, but raised a warning + * message. + * @KUTF_RESULT_FAIL: The test result failed with a non-fatal error. + * @KUTF_RESULT_FATAL: The test result failed with a fatal error. + * @KUTF_RESULT_ABORT: The test result failed due to a non-UTF + * assertion failure. + * @KUTF_RESULT_USERDATA: User data is ready to be read, + * this is not seen outside the kernel + * @KUTF_RESULT_USERDATA_WAIT: Waiting for user data to be sent, + * this is not seen outside the kernel + * @KUTF_RESULT_TEST_FINISHED: The test has finished, no more results will + * be produced. This is not seen outside kutf + */ +enum kutf_result_status { + KUTF_RESULT_BENCHMARK = -3, + KUTF_RESULT_SKIP = -2, + KUTF_RESULT_UNKNOWN = -1, + + KUTF_RESULT_PASS = 0, + KUTF_RESULT_DEBUG = 1, + KUTF_RESULT_INFO = 2, + KUTF_RESULT_WARN = 3, + KUTF_RESULT_FAIL = 4, + KUTF_RESULT_FATAL = 5, + KUTF_RESULT_ABORT = 6, + + KUTF_RESULT_USERDATA = 7, + KUTF_RESULT_USERDATA_WAIT = 8, + KUTF_RESULT_TEST_FINISHED = 9 +}; + +/* The maximum size of a kutf_result_status result when + * converted to a string + */ +#define KUTF_ERROR_MAX_NAME_SIZE 21 + +#ifdef __KERNEL__ + +#include +#include + +struct kutf_context; + +/** + * struct kutf_result - Represents a single test result. + * @node: Next result in the list of results. + * @status: The status summary (pass / warn / fail / etc). + * @message: A more verbose status message. + */ +struct kutf_result { + struct list_head node; + enum kutf_result_status status; + const char *message; +}; + +/** + * KUTF_RESULT_SET_WAITING_FOR_INPUT - Test is waiting for user data + * + * This flag is set within a struct kutf_result_set whenever the test is blocked + * waiting for user data. Attempts to dequeue results when this flag is set + * will cause a dummy %KUTF_RESULT_USERDATA_WAIT result to be produced. This + * is used to output a warning message and end of file. + */ +#define KUTF_RESULT_SET_WAITING_FOR_INPUT 1 + +/** + * struct kutf_result_set - Represents a set of results. + * @results: List head of a struct kutf_result list for storing the results + * @waitq: Wait queue signalled whenever new results are added. + * @flags: Flags see %KUTF_RESULT_SET_WAITING_FOR_INPUT + */ +struct kutf_result_set { + struct list_head results; + wait_queue_head_t waitq; + int flags; +}; + +/** + * kutf_create_result_set() - Create a new result set + * to which results can be added. + * + * Return: The created result set. + */ +struct kutf_result_set *kutf_create_result_set(void); + +/** + * kutf_add_result() - Add a result to the end of an existing result set. + * + * @context: The kutf context + * @status: The result status to add. + * @message: The result message to add. + * + * Return: 0 if the result is successfully added. -ENOMEM if allocation fails. + */ +int kutf_add_result(struct kutf_context *context, + enum kutf_result_status status, const char *message); + +/** + * kutf_remove_result() - Remove a result from the head of a result set. + * @set: The result set. + * + * This function will block until there is a result to read. The wait is + * interruptible, so this function will return with an ERR_PTR if interrupted. + * + * Return: result or ERR_PTR if interrupted + */ +struct kutf_result *kutf_remove_result( + struct kutf_result_set *set); + +/** + * kutf_destroy_result_set() - Free a previously created result set. + * + * @results: The result set whose resources to free. + */ +void kutf_destroy_result_set(struct kutf_result_set *results); + +/** + * kutf_set_waiting_for_input() - The test is waiting for userdata + * + * @set: The result set to update + * + * Causes the result set to always have results and return a fake + * %KUTF_RESULT_USERDATA_WAIT result. + */ +void kutf_set_waiting_for_input(struct kutf_result_set *set); + +/** + * kutf_clear_waiting_for_input() - The test is no longer waiting for userdata + * + * @set: The result set to update + * + * Cancels the effect of kutf_set_waiting_for_input() + */ +void kutf_clear_waiting_for_input(struct kutf_result_set *set); + +#endif /* __KERNEL__ */ + +#endif /* _KERNEL_UTF_RESULTSET_H_ */ diff --git a/drivers/gpu/arm/midgard/tests/include/kutf/kutf_suite.h b/drivers/gpu/arm/midgard/tests/include/kutf/kutf_suite.h new file mode 100644 index 00000000000000..ca30e575bc3a25 --- /dev/null +++ b/drivers/gpu/arm/midgard/tests/include/kutf/kutf_suite.h @@ -0,0 +1,564 @@ +/* + * + * (C) COPYRIGHT 2014, 2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +#ifndef _KERNEL_UTF_SUITE_H_ +#define _KERNEL_UTF_SUITE_H_ + +/* kutf_suite.h + * Functions for management of test suites. + * + * This collection of data structures, macros, and functions are used to + * create Test Suites, Tests within those Test Suites, and Fixture variants + * of each test. + */ + +#include +#include +#include + +#include +#include + +/* Arbitrary maximum size to prevent user space allocating too much kernel + * memory + */ +#define KUTF_MAX_LINE_LENGTH (1024u) + +/** + * Pseudo-flag indicating an absence of any specified test class. Note that + * tests should not be annotated with this constant as it is simply a zero + * value; tests without a more specific class must be marked with the flag + * KUTF_F_TEST_GENERIC. + */ +#define KUTF_F_TEST_NONE ((unsigned int)(0)) + +/** + * Class indicating this test is a smoke test. + * A given set of smoke tests should be quick to run, enabling rapid turn-around + * of "regress-on-commit" test runs. + */ +#define KUTF_F_TEST_SMOKETEST ((unsigned int)(1 << 1)) + +/** + * Class indicating this test is a performance test. + * These tests typically produce a performance metric, such as "time to run" or + * "frames per second", + */ +#define KUTF_F_TEST_PERFORMANCE ((unsigned int)(1 << 2)) + +/** + * Class indicating that this test is a deprecated test. + * These tests have typically been replaced by an alternative test which is + * more efficient, or has better coverage. + */ +#define KUTF_F_TEST_DEPRECATED ((unsigned int)(1 << 3)) + +/** + * Class indicating that this test is a known failure. + * These tests have typically been run and failed, but marking them as a known + * failure means it is easier to triage results. + * + * It is typically more convenient to triage known failures using the + * results database and web UI, as this means there is no need to modify the + * test code. + */ +#define KUTF_F_TEST_EXPECTED_FAILURE ((unsigned int)(1 << 4)) + +/** + * Class indicating that this test is a generic test, which is not a member of + * a more specific test class. Tests which are not created with a specific set + * of filter flags by the user are assigned this test class by default. + */ +#define KUTF_F_TEST_GENERIC ((unsigned int)(1 << 5)) + +/** + * Class indicating this test is a resource allocation failure test. + * A resource allocation failure test will test that an error code is + * correctly propagated when an allocation fails. + */ +#define KUTF_F_TEST_RESFAIL ((unsigned int)(1 << 6)) + +/** + * Additional flag indicating that this test is an expected failure when + * run in resource failure mode. These tests are never run when running + * the low resource mode. + */ +#define KUTF_F_TEST_EXPECTED_FAILURE_RF ((unsigned int)(1 << 7)) + +/** + * Flag reserved for user-defined filter zero. + */ +#define KUTF_F_TEST_USER_0 ((unsigned int)(1 << 24)) + +/** + * Flag reserved for user-defined filter one. + */ +#define KUTF_F_TEST_USER_1 ((unsigned int)(1 << 25)) + +/** + * Flag reserved for user-defined filter two. + */ +#define KUTF_F_TEST_USER_2 ((unsigned int)(1 << 26)) + +/** + * Flag reserved for user-defined filter three. + */ +#define KUTF_F_TEST_USER_3 ((unsigned int)(1 << 27)) + +/** + * Flag reserved for user-defined filter four. + */ +#define KUTF_F_TEST_USER_4 ((unsigned int)(1 << 28)) + +/** + * Flag reserved for user-defined filter five. + */ +#define KUTF_F_TEST_USER_5 ((unsigned int)(1 << 29)) + +/** + * Flag reserved for user-defined filter six. + */ +#define KUTF_F_TEST_USER_6 ((unsigned int)(1 << 30)) + +/** + * Flag reserved for user-defined filter seven. + */ +#define KUTF_F_TEST_USER_7 ((unsigned int)(1 << 31)) + +/** + * Pseudo-flag indicating that all test classes should be executed. + */ +#define KUTF_F_TEST_ALL ((unsigned int)(0xFFFFFFFFU)) + +/** + * union kutf_callback_data - Union used to store test callback data + * @ptr_value: pointer to the location where test callback data + * are stored + * @u32_value: a number which represents test callback data + */ +union kutf_callback_data { + void *ptr_value; + u32 u32_value; +}; + +/** + * struct kutf_userdata_line - A line of user data to be returned to the user + * @node: struct list_head to link this into a list + * @str: The line of user data to return to user space + * @size: The number of bytes within @str + */ +struct kutf_userdata_line { + struct list_head node; + char *str; + size_t size; +}; + +/** + * KUTF_USERDATA_WARNING_OUTPUT - Flag specifying that a warning has been output + * + * If user space reads the "run" file while the test is waiting for user data, + * then the framework will output a warning message and set this flag within + * struct kutf_userdata. A subsequent read will then simply return an end of + * file condition rather than outputting the warning again. The upshot of this + * is that simply running 'cat' on a test which requires user data will produce + * the warning followed by 'cat' exiting due to EOF - which is much more user + * friendly than blocking indefinitely waiting for user data. + */ +#define KUTF_USERDATA_WARNING_OUTPUT 1 + +/** + * struct kutf_userdata - Structure holding user data + * @flags: See %KUTF_USERDATA_WARNING_OUTPUT + * @input_head: List of struct kutf_userdata_line containing user data + * to be read by the kernel space test. + * @input_waitq: Wait queue signalled when there is new user data to be + * read by the kernel space test. + */ +struct kutf_userdata { + unsigned long flags; + struct list_head input_head; + wait_queue_head_t input_waitq; +}; + +/** + * struct kutf_context - Structure representing a kernel test context + * @kref: Refcount for number of users of this context + * @suite: Convenience pointer to the suite this context + * is running + * @test_fix: The fixture that is being run in this context + * @fixture_pool: The memory pool used for the duration of + * the fixture/text context. + * @fixture: The user provided fixture structure. + * @fixture_index: The index (id) of the current fixture. + * @fixture_name: The name of the current fixture (or NULL if unnamed). + * @test_data: Any user private data associated with this test + * @result_set: All the results logged by this test context + * @status: The status of the currently running fixture. + * @expected_status: The expected status on exist of the currently + * running fixture. + * @work: Work item to enqueue onto the work queue to run the test + * @userdata: Structure containing the user data for the test to read + */ +struct kutf_context { + struct kref kref; + struct kutf_suite *suite; + struct kutf_test_fixture *test_fix; + struct kutf_mempool fixture_pool; + void *fixture; + unsigned int fixture_index; + const char *fixture_name; + union kutf_callback_data test_data; + struct kutf_result_set *result_set; + enum kutf_result_status status; + enum kutf_result_status expected_status; + + struct work_struct work; + struct kutf_userdata userdata; +}; + +/** + * struct kutf_suite - Structure representing a kernel test suite + * @app: The application this suite belongs to. + * @name: The name of this suite. + * @suite_data: Any user private data associated with this + * suite. + * @create_fixture: Function used to create a new fixture instance + * @remove_fixture: Function used to destroy a new fixture instance + * @fixture_variants: The number of variants (must be at least 1). + * @suite_default_flags: Suite global filter flags which are set on + * all tests. + * @node: List node for suite_list + * @dir: The debugfs directory for this suite + * @test_list: List head to store all the tests which are + * part of this suite + */ +struct kutf_suite { + struct kutf_application *app; + const char *name; + union kutf_callback_data suite_data; + void *(*create_fixture)(struct kutf_context *context); + void (*remove_fixture)(struct kutf_context *context); + unsigned int fixture_variants; + unsigned int suite_default_flags; + struct list_head node; + struct dentry *dir; + struct list_head test_list; +}; + +/* ============================================================================ + Application functions +============================================================================ */ + +/** + * kutf_create_application() - Create an in kernel test application. + * @name: The name of the test application. + * + * Return: pointer to the kutf_application on success or NULL + * on failure + */ +struct kutf_application *kutf_create_application(const char *name); + +/** + * kutf_destroy_application() - Destroy an in kernel test application. + * + * @app: The test application to destroy. + */ +void kutf_destroy_application(struct kutf_application *app); + +/* ============================================================================ + Suite functions +============================================================================ */ + +/** + * kutf_create_suite() - Create a kernel test suite. + * @app: The test application to create the suite in. + * @name: The name of the suite. + * @fixture_count: The number of fixtures to run over the test + * functions in this suite + * @create_fixture: Callback used to create a fixture. The returned value + * is stored in the fixture pointer in the context for + * use in the test functions. + * @remove_fixture: Callback used to remove a previously created fixture. + * + * Suite names must be unique. Should two suites with the same name be + * registered with the same application then this function will fail, if they + * are registered with different applications then the function will not detect + * this and the call will succeed. + * + * Return: pointer to the created kutf_suite on success or NULL + * on failure + */ +struct kutf_suite *kutf_create_suite( + struct kutf_application *app, + const char *name, + unsigned int fixture_count, + void *(*create_fixture)(struct kutf_context *context), + void (*remove_fixture)(struct kutf_context *context)); + +/** + * kutf_create_suite_with_filters() - Create a kernel test suite with user + * defined default filters. + * @app: The test application to create the suite in. + * @name: The name of the suite. + * @fixture_count: The number of fixtures to run over the test + * functions in this suite + * @create_fixture: Callback used to create a fixture. The returned value + * is stored in the fixture pointer in the context for + * use in the test functions. + * @remove_fixture: Callback used to remove a previously created fixture. + * @filters: Filters to apply to a test if it doesn't provide its own + * + * Suite names must be unique. Should two suites with the same name be + * registered with the same application then this function will fail, if they + * are registered with different applications then the function will not detect + * this and the call will succeed. + * + * Return: pointer to the created kutf_suite on success or NULL on failure + */ +struct kutf_suite *kutf_create_suite_with_filters( + struct kutf_application *app, + const char *name, + unsigned int fixture_count, + void *(*create_fixture)(struct kutf_context *context), + void (*remove_fixture)(struct kutf_context *context), + unsigned int filters); + +/** + * kutf_create_suite_with_filters_and_data() - Create a kernel test suite with + * user defined default filters. + * @app: The test application to create the suite in. + * @name: The name of the suite. + * @fixture_count: The number of fixtures to run over the test + * functions in this suite + * @create_fixture: Callback used to create a fixture. The returned value + * is stored in the fixture pointer in the context for + * use in the test functions. + * @remove_fixture: Callback used to remove a previously created fixture. + * @filters: Filters to apply to a test if it doesn't provide its own + * @suite_data: Suite specific callback data, provided during the + * running of the test in the kutf_context + * + * Return: pointer to the created kutf_suite on success or NULL + * on failure + */ +struct kutf_suite *kutf_create_suite_with_filters_and_data( + struct kutf_application *app, + const char *name, + unsigned int fixture_count, + void *(*create_fixture)(struct kutf_context *context), + void (*remove_fixture)(struct kutf_context *context), + unsigned int filters, + union kutf_callback_data suite_data); + +/** + * kutf_add_test() - Add a test to a kernel test suite. + * @suite: The suite to add the test to. + * @id: The ID of the test. + * @name: The name of the test. + * @execute: Callback to the test function to run. + * + * Note: As no filters are provided the test will use the suite filters instead + */ +void kutf_add_test(struct kutf_suite *suite, + unsigned int id, + const char *name, + void (*execute)(struct kutf_context *context)); + +/** + * kutf_add_test_with_filters() - Add a test to a kernel test suite with filters + * @suite: The suite to add the test to. + * @id: The ID of the test. + * @name: The name of the test. + * @execute: Callback to the test function to run. + * @filters: A set of filtering flags, assigning test categories. + */ +void kutf_add_test_with_filters(struct kutf_suite *suite, + unsigned int id, + const char *name, + void (*execute)(struct kutf_context *context), + unsigned int filters); + +/** + * kutf_add_test_with_filters_and_data() - Add a test to a kernel test suite + * with filters. + * @suite: The suite to add the test to. + * @id: The ID of the test. + * @name: The name of the test. + * @execute: Callback to the test function to run. + * @filters: A set of filtering flags, assigning test categories. + * @test_data: Test specific callback data, provided during the + * running of the test in the kutf_context + */ +void kutf_add_test_with_filters_and_data( + struct kutf_suite *suite, + unsigned int id, + const char *name, + void (*execute)(struct kutf_context *context), + unsigned int filters, + union kutf_callback_data test_data); + + +/* ============================================================================ + Test functions +============================================================================ */ +/** + * kutf_test_log_result_external() - Log a result which has been created + * externally into a in a standard form + * recognized by the log parser. + * @context: The test context the test is running in + * @message: The message for this result + * @new_status: The result status of this log message + */ +void kutf_test_log_result_external( + struct kutf_context *context, + const char *message, + enum kutf_result_status new_status); + +/** + * kutf_test_expect_abort() - Tell the kernel that you expect the current + * fixture to produce an abort. + * @context: The test context this test is running in. + */ +void kutf_test_expect_abort(struct kutf_context *context); + +/** + * kutf_test_expect_fatal() - Tell the kernel that you expect the current + * fixture to produce a fatal error. + * @context: The test context this test is running in. + */ +void kutf_test_expect_fatal(struct kutf_context *context); + +/** + * kutf_test_expect_fail() - Tell the kernel that you expect the current + * fixture to fail. + * @context: The test context this test is running in. + */ +void kutf_test_expect_fail(struct kutf_context *context); + +/** + * kutf_test_expect_warn() - Tell the kernel that you expect the current + * fixture to produce a warning. + * @context: The test context this test is running in. + */ +void kutf_test_expect_warn(struct kutf_context *context); + +/** + * kutf_test_expect_pass() - Tell the kernel that you expect the current + * fixture to pass. + * @context: The test context this test is running in. + */ +void kutf_test_expect_pass(struct kutf_context *context); + +/** + * kutf_test_skip() - Tell the kernel that the test should be skipped. + * @context: The test context this test is running in. + */ +void kutf_test_skip(struct kutf_context *context); + +/** + * kutf_test_skip_msg() - Tell the kernel that this test has been skipped, + * supplying a reason string. + * @context: The test context this test is running in. + * @message: A message string containing the reason for the skip. + * + * Note: The message must not be freed during the lifetime of the test run. + * This means it should either be a prebaked string, or if a dynamic string + * is required it must be created with kutf_dsprintf which will store + * the resultant string in a buffer who's lifetime is the same as the test run. + */ +void kutf_test_skip_msg(struct kutf_context *context, const char *message); + +/** + * kutf_test_pass() - Tell the kernel that this test has passed. + * @context: The test context this test is running in. + * @message: A message string containing the reason for the pass. + * + * Note: The message must not be freed during the lifetime of the test run. + * This means it should either be a pre-baked string, or if a dynamic string + * is required it must be created with kutf_dsprintf which will store + * the resultant string in a buffer who's lifetime is the same as the test run. + */ +void kutf_test_pass(struct kutf_context *context, char const *message); + +/** + * kutf_test_debug() - Send a debug message + * @context: The test context this test is running in. + * @message: A message string containing the debug information. + * + * Note: The message must not be freed during the lifetime of the test run. + * This means it should either be a pre-baked string, or if a dynamic string + * is required it must be created with kutf_dsprintf which will store + * the resultant string in a buffer who's lifetime is the same as the test run. + */ +void kutf_test_debug(struct kutf_context *context, char const *message); + +/** + * kutf_test_info() - Send an information message + * @context: The test context this test is running in. + * @message: A message string containing the information message. + * + * Note: The message must not be freed during the lifetime of the test run. + * This means it should either be a pre-baked string, or if a dynamic string + * is required it must be created with kutf_dsprintf which will store + * the resultant string in a buffer who's lifetime is the same as the test run. + */ +void kutf_test_info(struct kutf_context *context, char const *message); + +/** + * kutf_test_warn() - Send a warning message + * @context: The test context this test is running in. + * @message: A message string containing the warning message. + * + * Note: The message must not be freed during the lifetime of the test run. + * This means it should either be a pre-baked string, or if a dynamic string + * is required it must be created with kutf_dsprintf which will store + * the resultant string in a buffer who's lifetime is the same as the test run. + */ +void kutf_test_warn(struct kutf_context *context, char const *message); + +/** + * kutf_test_fail() - Tell the kernel that a test has failed + * @context: The test context this test is running in. + * @message: A message string containing the failure message. + * + * Note: The message must not be freed during the lifetime of the test run. + * This means it should either be a pre-baked string, or if a dynamic string + * is required it must be created with kutf_dsprintf which will store + * the resultant string in a buffer who's lifetime is the same as the test run. + */ +void kutf_test_fail(struct kutf_context *context, char const *message); + +/** + * kutf_test_fatal() - Tell the kernel that a test has triggered a fatal error + * @context: The test context this test is running in. + * @message: A message string containing the fatal error message. + * + * Note: The message must not be freed during the lifetime of the test run. + * This means it should either be a pre-baked string, or if a dynamic string + * is required it must be created with kutf_dsprintf which will store + * the resultant string in a buffer who's lifetime is the same as the test run. + */ +void kutf_test_fatal(struct kutf_context *context, char const *message); + +/** + * kutf_test_abort() - Tell the kernel that a test triggered an abort in the test + * + * @context: The test context this test is running in. + */ +void kutf_test_abort(struct kutf_context *context); + +#endif /* _KERNEL_UTF_SUITE_H_ */ diff --git a/drivers/gpu/arm/midgard/tests/include/kutf/kutf_utils.h b/drivers/gpu/arm/midgard/tests/include/kutf/kutf_utils.h new file mode 100644 index 00000000000000..c458c1f73802b3 --- /dev/null +++ b/drivers/gpu/arm/midgard/tests/include/kutf/kutf_utils.h @@ -0,0 +1,55 @@ +/* + * + * (C) COPYRIGHT 2014, 2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +#ifndef _KERNEL_UTF_UTILS_H_ +#define _KERNEL_UTF_UTILS_H_ + +/* kutf_utils.h + * Utilities for the kernel UTF test infrastructure. + * + * This collection of library functions are provided for use by kernel UTF + * and users of kernel UTF which don't directly fit within the other + * code modules. + */ + +#include + +/** + * Maximum size of the message strings within kernel UTF, messages longer then + * this will be truncated. + */ +#define KUTF_MAX_DSPRINTF_LEN 1024 + +/** + * kutf_dsprintf() - dynamic sprintf + * @pool: memory pool to allocate from + * @fmt: The format string describing the string to document. + * @... The parameters to feed in to the format string. + * + * This function implements sprintf which dynamically allocates memory to store + * the string. The library will free the memory containing the string when the + * result set is cleared or destroyed. + * + * Note The returned string may be truncated to fit an internal temporary + * buffer, which is KUTF_MAX_DSPRINTF_LEN bytes in length. + * + * Return: Returns pointer to allocated string, or NULL on error. + */ +const char *kutf_dsprintf(struct kutf_mempool *pool, + const char *fmt, ...); + +#endif /* _KERNEL_UTF_UTILS_H_ */ diff --git a/drivers/gpu/arm/midgard/tests/kutf/Kbuild b/drivers/gpu/arm/midgard/tests/kutf/Kbuild new file mode 100644 index 00000000000000..97f80057224f93 --- /dev/null +++ b/drivers/gpu/arm/midgard/tests/kutf/Kbuild @@ -0,0 +1,20 @@ +# +# (C) COPYRIGHT 2017 ARM Limited. All rights reserved. +# +# This program is free software and is provided to you under the terms of the +# GNU General Public License version 2 as published by the Free Software +# Foundation, and any use by you of this program is subject to the terms +# of such GNU licence. +# +# A copy of the licence is included with the program, and can also be obtained +# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, +# Boston, MA 02110-1301, USA. +# +# + + +ccflags-y += -I$(src)/../include + +obj-$(CONFIG_MALI_KUTF) += kutf.o + +kutf-y := kutf_mem.o kutf_resultset.o kutf_suite.o kutf_utils.o kutf_helpers.o kutf_helpers_user.o diff --git a/drivers/gpu/arm/midgard/tests/kutf/Kconfig b/drivers/gpu/arm/midgard/tests/kutf/Kconfig new file mode 100644 index 00000000000000..6a87bdbf746e76 --- /dev/null +++ b/drivers/gpu/arm/midgard/tests/kutf/Kconfig @@ -0,0 +1,22 @@ +# +# (C) COPYRIGHT 2017 ARM Limited. All rights reserved. +# +# This program is free software and is provided to you under the terms of the +# GNU General Public License version 2 as published by the Free Software +# Foundation, and any use by you of this program is subject to the terms +# of such GNU licence. +# +# A copy of the licence is included with the program, and can also be obtained +# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, +# Boston, MA 02110-1301, USA. +# +# + + + +config MALI_KUTF + tristate "Mali Kernel Unit Test Framework" + default m + help + Enables MALI testing framework. To compile it as a module, + choose M here - this will generate a single module called kutf. diff --git a/drivers/gpu/arm/midgard/tests/kutf/Makefile b/drivers/gpu/arm/midgard/tests/kutf/Makefile new file mode 100644 index 00000000000000..010c92ca39b9fd --- /dev/null +++ b/drivers/gpu/arm/midgard/tests/kutf/Makefile @@ -0,0 +1,29 @@ +# +# (C) COPYRIGHT 2014-2017 ARM Limited. All rights reserved. +# +# This program is free software and is provided to you under the terms of the +# GNU General Public License version 2 as published by the Free Software +# Foundation, and any use by you of this program is subject to the terms +# of such GNU licence. +# +# A copy of the licence is included with the program, and can also be obtained +# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, +# Boston, MA 02110-1301, USA. +# +# + + +# linux build system bootstrap for out-of-tree module + +# default to building for the host +ARCH ?= $(shell uname -m) + +ifeq ($(KDIR),) +$(error Must specify KDIR to point to the kernel to target)) +endif + +all: + $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) $(SCONS_CONFIGS) EXTRA_CFLAGS=-I$(CURDIR)/../include modules + +clean: + $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) clean diff --git a/drivers/gpu/arm/midgard/tests/kutf/kutf_helpers.c b/drivers/gpu/arm/midgard/tests/kutf/kutf_helpers.c new file mode 100644 index 00000000000000..bf887a5dadac28 --- /dev/null +++ b/drivers/gpu/arm/midgard/tests/kutf/kutf_helpers.c @@ -0,0 +1,124 @@ +/* + * + * (C) COPYRIGHT 2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +/* Kernel UTF test helpers */ +#include + +#include +#include +#include +#include +#include +#include + +static DEFINE_SPINLOCK(kutf_input_lock); + +static bool pending_input(struct kutf_context *context) +{ + bool input_pending; + + spin_lock(&kutf_input_lock); + + input_pending = !list_empty(&context->userdata.input_head); + + spin_unlock(&kutf_input_lock); + + return input_pending; +} + +char *kutf_helper_input_dequeue(struct kutf_context *context, size_t *str_size) +{ + struct kutf_userdata_line *line; + + spin_lock(&kutf_input_lock); + + while (list_empty(&context->userdata.input_head)) { + int err; + + kutf_set_waiting_for_input(context->result_set); + + spin_unlock(&kutf_input_lock); + + err = wait_event_interruptible(context->userdata.input_waitq, + pending_input(context)); + + if (err) + return ERR_PTR(-EINTR); + + spin_lock(&kutf_input_lock); + } + + line = list_first_entry(&context->userdata.input_head, + struct kutf_userdata_line, node); + if (line->str) { + /* + * Unless it is the end-of-input marker, + * remove it from the list + */ + list_del(&line->node); + } + + spin_unlock(&kutf_input_lock); + + if (str_size) + *str_size = line->size; + return line->str; +} + +int kutf_helper_input_enqueue(struct kutf_context *context, + const char __user *str, size_t size) +{ + struct kutf_userdata_line *line; + + line = kutf_mempool_alloc(&context->fixture_pool, + sizeof(*line) + size + 1); + if (!line) + return -ENOMEM; + if (str) { + unsigned long bytes_not_copied; + + line->size = size; + line->str = (void *)(line + 1); + bytes_not_copied = copy_from_user(line->str, str, size); + if (bytes_not_copied != 0) + return -EFAULT; + /* Zero terminate the string */ + line->str[size] = '\0'; + } else { + /* This is used to mark the end of input */ + WARN_ON(size); + line->size = 0; + line->str = NULL; + } + + spin_lock(&kutf_input_lock); + + list_add_tail(&line->node, &context->userdata.input_head); + + kutf_clear_waiting_for_input(context->result_set); + + spin_unlock(&kutf_input_lock); + + wake_up(&context->userdata.input_waitq); + + return 0; +} + +void kutf_helper_input_enqueue_end_of_data(struct kutf_context *context) +{ + kutf_helper_input_enqueue(context, NULL, 0); +} diff --git a/drivers/gpu/arm/midgard/tests/kutf/kutf_helpers_user.c b/drivers/gpu/arm/midgard/tests/kutf/kutf_helpers_user.c new file mode 100644 index 00000000000000..9e8ab99572934e --- /dev/null +++ b/drivers/gpu/arm/midgard/tests/kutf/kutf_helpers_user.c @@ -0,0 +1,462 @@ +/* + * + * (C) COPYRIGHT 2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +/* Kernel UTF test helpers that mirror those for kutf-userside */ +#include +#include +#include + +#include +#include + +const char *valtype_names[] = { + "INVALID", + "U64", + "STR", +}; + +static const char *get_val_type_name(enum kutf_helper_valtype valtype) +{ + /* enums can be signed or unsigned (implementation dependant), so + * enforce it to prevent: + * a) "<0 comparison on unsigned type" warning - if we did both upper + * and lower bound check + * b) incorrect range checking if it was a signed type - if we did + * upper bound check only */ + unsigned int type_idx = (unsigned int)valtype; + + if (type_idx >= (unsigned int)KUTF_HELPER_VALTYPE_COUNT) + type_idx = (unsigned int)KUTF_HELPER_VALTYPE_INVALID; + + return valtype_names[type_idx]; +} + +/* Check up to str_len chars of val_str to see if it's a valid value name: + * + * - Has between 1 and KUTF_HELPER_MAX_VAL_NAME_LEN characters before the \0 terminator + * - And, each char is in the character set [A-Z0-9_] */ +static int validate_val_name(const char *val_str, int str_len) +{ + int i = 0; + + for (i = 0; str_len && i <= KUTF_HELPER_MAX_VAL_NAME_LEN && val_str[i] != '\0'; ++i, --str_len) { + char val_chr = val_str[i]; + + if (val_chr >= 'A' && val_chr <= 'Z') + continue; + if (val_chr >= '0' && val_chr <= '9') + continue; + if (val_chr == '_') + continue; + + /* Character not in the set [A-Z0-9_] - report error */ + return 1; + } + + /* Names of 0 length are not valid */ + if (i == 0) + return 1; + /* Length greater than KUTF_HELPER_MAX_VAL_NAME_LEN not allowed */ + if (i > KUTF_HELPER_MAX_VAL_NAME_LEN || (i == KUTF_HELPER_MAX_VAL_NAME_LEN && val_str[i] != '\0')) + return 1; + + return 0; +} + +/* Find the length of the valid part of the string when it will be in quotes + * e.g. "str" + * + * That is, before any '\\', '\n' or '"' characters. This is so we don't have + * to escape the string */ +static int find_quoted_string_valid_len(const char *str) +{ + char *ptr; + const char *check_chars = "\\\n\""; + + ptr = strpbrk(str, check_chars); + if (ptr) + return (int)(ptr-str); + + return (int)strlen(str); +} + +static int kutf_helper_userdata_enqueue(struct kutf_context *context, + const char *str) +{ + char *str_copy; + size_t len; + int err; + + len = strlen(str)+1; + + str_copy = kutf_mempool_alloc(&context->fixture_pool, len); + if (!str_copy) + return -ENOMEM; + + strcpy(str_copy, str); + + err = kutf_add_result(context, KUTF_RESULT_USERDATA, str_copy); + + return err; +} + +#define MAX_U64_HEX_LEN 16 +/* (Name size) + ("=0x" size) + (64-bit hex value size) + (terminator) */ +#define NAMED_U64_VAL_BUF_SZ (KUTF_HELPER_MAX_VAL_NAME_LEN + 3 + MAX_U64_HEX_LEN + 1) + +int kutf_helper_send_named_u64(struct kutf_context *context, + const char *val_name, u64 val) +{ + int ret = 1; + char msgbuf[NAMED_U64_VAL_BUF_SZ]; + const char *errmsg = NULL; + + if (validate_val_name(val_name, KUTF_HELPER_MAX_VAL_NAME_LEN + 1)) { + errmsg = kutf_dsprintf(&context->fixture_pool, + "Failed to send u64 value named '%s': Invalid value name", val_name); + goto out_err; + } + + ret = snprintf(msgbuf, NAMED_U64_VAL_BUF_SZ, "%s=0x%llx", val_name, val); + if (ret >= NAMED_U64_VAL_BUF_SZ || ret < 0) { + errmsg = kutf_dsprintf(&context->fixture_pool, + "Failed to send u64 value named '%s': snprintf() problem buffer size==%d ret=%d", + val_name, NAMED_U64_VAL_BUF_SZ, ret); + goto out_err; + } + + ret = kutf_helper_userdata_enqueue(context, msgbuf); + if (ret) { + errmsg = kutf_dsprintf(&context->fixture_pool, + "Failed to send u64 value named '%s': send returned %d", + val_name, ret); + goto out_err; + } + + return ret; +out_err: + kutf_test_fail(context, errmsg); + return ret; +} +EXPORT_SYMBOL(kutf_helper_send_named_u64); + +#define NAMED_VALUE_SEP "=" +#define NAMED_STR_START_DELIM NAMED_VALUE_SEP "\"" +#define NAMED_STR_END_DELIM "\"" + +int kutf_helper_max_str_len_for_kern(const char *val_name, + int kern_buf_sz) +{ + const int val_name_len = strlen(val_name); + const int start_delim_len = strlen(NAMED_STR_START_DELIM); + const int end_delim_len = strlen(NAMED_STR_END_DELIM); + int max_msg_len = kern_buf_sz; + int max_str_len; + + max_str_len = max_msg_len - val_name_len - start_delim_len - + end_delim_len; + + return max_str_len; +} +EXPORT_SYMBOL(kutf_helper_max_str_len_for_kern); + +int kutf_helper_send_named_str(struct kutf_context *context, + const char *val_name, + const char *val_str) +{ + int val_str_len; + int str_buf_sz; + char *str_buf = NULL; + int ret = 1; + char *copy_ptr; + int val_name_len; + int start_delim_len = strlen(NAMED_STR_START_DELIM); + int end_delim_len = strlen(NAMED_STR_END_DELIM); + const char *errmsg = NULL; + + if (validate_val_name(val_name, KUTF_HELPER_MAX_VAL_NAME_LEN + 1)) { + errmsg = kutf_dsprintf(&context->fixture_pool, + "Failed to send u64 value named '%s': Invalid value name", val_name); + goto out_err; + } + val_name_len = strlen(val_name); + + val_str_len = find_quoted_string_valid_len(val_str); + + /* (name length) + ("=\"" length) + (val_str len) + ("\"" length) + terminator */ + str_buf_sz = val_name_len + start_delim_len + val_str_len + end_delim_len + 1; + + /* Using kmalloc() here instead of mempool since we know we need to free + * before we return */ + str_buf = kmalloc(str_buf_sz, GFP_KERNEL); + if (!str_buf) { + errmsg = kutf_dsprintf(&context->fixture_pool, + "Failed to send str value named '%s': kmalloc failed, str_buf_sz=%d", + val_name, str_buf_sz); + goto out_err; + } + copy_ptr = str_buf; + + /* Manually copy each string component instead of snprintf because + * val_str may need to end early, and less error path handling */ + + /* name */ + memcpy(copy_ptr, val_name, val_name_len); + copy_ptr += val_name_len; + + /* str start delimiter */ + memcpy(copy_ptr, NAMED_STR_START_DELIM, start_delim_len); + copy_ptr += start_delim_len; + + /* str value */ + memcpy(copy_ptr, val_str, val_str_len); + copy_ptr += val_str_len; + + /* str end delimiter */ + memcpy(copy_ptr, NAMED_STR_END_DELIM, end_delim_len); + copy_ptr += end_delim_len; + + /* Terminator */ + *copy_ptr = '\0'; + + ret = kutf_helper_userdata_enqueue(context, str_buf); + + if (ret) { + errmsg = kutf_dsprintf(&context->fixture_pool, + "Failed to send str value named '%s': send returned %d", + val_name, ret); + goto out_err; + } + + kfree(str_buf); + return ret; + +out_err: + kutf_test_fail(context, errmsg); + kfree(str_buf); + return ret; +} +EXPORT_SYMBOL(kutf_helper_send_named_str); + +int kutf_helper_receive_named_val( + struct kutf_context *context, + struct kutf_helper_named_val *named_val) +{ + size_t recv_sz; + char *recv_str; + char *search_ptr; + char *name_str = NULL; + int name_len; + int strval_len; + enum kutf_helper_valtype type = KUTF_HELPER_VALTYPE_INVALID; + char *strval = NULL; + u64 u64val = 0; + int err = KUTF_HELPER_ERR_INVALID_VALUE; + + recv_str = kutf_helper_input_dequeue(context, &recv_sz); + if (!recv_str) + return -EBUSY; + else if (IS_ERR(recv_str)) + return PTR_ERR(recv_str); + + /* Find the '=', grab the name and validate it */ + search_ptr = strnchr(recv_str, recv_sz, NAMED_VALUE_SEP[0]); + if (search_ptr) { + name_len = search_ptr - recv_str; + if (!validate_val_name(recv_str, name_len)) { + /* no need to reallocate - just modify string in place */ + name_str = recv_str; + name_str[name_len] = '\0'; + + /* Move until after the '=' */ + recv_str += (name_len + 1); + recv_sz -= (name_len + 1); + } + } + if (!name_str) { + pr_err("Invalid name part for received string '%s'\n", + recv_str); + return KUTF_HELPER_ERR_INVALID_NAME; + } + + /* detect value type */ + if (*recv_str == NAMED_STR_START_DELIM[1]) { + /* string delimiter start*/ + ++recv_str; + --recv_sz; + + /* Find end of string */ + search_ptr = strnchr(recv_str, recv_sz, NAMED_STR_END_DELIM[0]); + if (search_ptr) { + strval_len = search_ptr - recv_str; + /* Validate the string to ensure it contains no quotes */ + if (strval_len == find_quoted_string_valid_len(recv_str)) { + /* no need to reallocate - just modify string in place */ + strval = recv_str; + strval[strval_len] = '\0'; + + /* Move until after the end delimiter */ + recv_str += (strval_len + 1); + recv_sz -= (strval_len + 1); + type = KUTF_HELPER_VALTYPE_STR; + } else { + pr_err("String value contains invalid characters in rest of received string '%s'\n", recv_str); + err = KUTF_HELPER_ERR_CHARS_AFTER_VAL; + } + } else { + pr_err("End of string delimiter not found in rest of received string '%s'\n", recv_str); + err = KUTF_HELPER_ERR_NO_END_DELIMITER; + } + } else { + /* possibly a number value - strtoull will parse it */ + err = kstrtoull(recv_str, 0, &u64val); + /* unlike userspace can't get an end ptr, but if kstrtoull() + * reads characters after the number it'll report -EINVAL */ + if (!err) { + int len_remain = strnlen(recv_str, recv_sz); + + type = KUTF_HELPER_VALTYPE_U64; + recv_str += len_remain; + recv_sz -= len_remain; + } else { + /* special case: not a number, report as such */ + pr_err("Rest of received string was not a numeric value or quoted string value: '%s'\n", recv_str); + } + } + + if (type == KUTF_HELPER_VALTYPE_INVALID) + return err; + + /* Any remaining characters - error */ + if (strnlen(recv_str, recv_sz) != 0) { + pr_err("Characters remain after value of type %s: '%s'\n", + get_val_type_name(type), recv_str); + return KUTF_HELPER_ERR_CHARS_AFTER_VAL; + } + + /* Success - write into the output structure */ + switch (type) { + case KUTF_HELPER_VALTYPE_U64: + named_val->u.val_u64 = u64val; + break; + case KUTF_HELPER_VALTYPE_STR: + named_val->u.val_str = strval; + break; + default: + pr_err("Unreachable, fix kutf_helper_receive_named_val\n"); + /* Coding error, report as though 'run' file failed */ + return -EINVAL; + } + + named_val->val_name = name_str; + named_val->type = type; + + return KUTF_HELPER_ERR_NONE; +} +EXPORT_SYMBOL(kutf_helper_receive_named_val); + +#define DUMMY_MSG "" +int kutf_helper_receive_check_val( + struct kutf_helper_named_val *named_val, + struct kutf_context *context, + const char *expect_val_name, + enum kutf_helper_valtype expect_val_type) +{ + int err; + + err = kutf_helper_receive_named_val(context, named_val); + if (err < 0) { + const char *msg = kutf_dsprintf(&context->fixture_pool, + "Failed to receive value named '%s'", + expect_val_name); + kutf_test_fail(context, msg); + return err; + } else if (err > 0) { + const char *msg = kutf_dsprintf(&context->fixture_pool, + "Named-value parse error when expecting value named '%s'", + expect_val_name); + kutf_test_fail(context, msg); + goto out_fail_and_fixup; + } + + if (strcmp(named_val->val_name, expect_val_name) != 0) { + const char *msg = kutf_dsprintf(&context->fixture_pool, + "Expecting to receive value named '%s' but got '%s'", + expect_val_name, named_val->val_name); + kutf_test_fail(context, msg); + goto out_fail_and_fixup; + } + + + if (named_val->type != expect_val_type) { + const char *msg = kutf_dsprintf(&context->fixture_pool, + "Expecting value named '%s' to be of type %s but got %s", + expect_val_name, get_val_type_name(expect_val_type), + get_val_type_name(named_val->type)); + kutf_test_fail(context, msg); + goto out_fail_and_fixup; + } + + return err; + +out_fail_and_fixup: + /* Produce a valid but incorrect value */ + switch (expect_val_type) { + case KUTF_HELPER_VALTYPE_U64: + named_val->u.val_u64 = 0ull; + break; + case KUTF_HELPER_VALTYPE_STR: + { + char *str = kutf_mempool_alloc(&context->fixture_pool, sizeof(DUMMY_MSG)); + + if (!str) + return -1; + + strcpy(str, DUMMY_MSG); + named_val->u.val_str = str; + break; + } + default: + break; + } + + /* Indicate that this is invalid */ + named_val->type = KUTF_HELPER_VALTYPE_INVALID; + + /* But at least allow the caller to continue in the test with failures */ + return 0; +} +EXPORT_SYMBOL(kutf_helper_receive_check_val); + +void kutf_helper_output_named_val(struct kutf_helper_named_val *named_val) +{ + switch (named_val->type) { + case KUTF_HELPER_VALTYPE_U64: + pr_warn("%s=0x%llx\n", named_val->val_name, named_val->u.val_u64); + break; + case KUTF_HELPER_VALTYPE_STR: + pr_warn("%s=\"%s\"\n", named_val->val_name, named_val->u.val_str); + break; + case KUTF_HELPER_VALTYPE_INVALID: + pr_warn("%s is invalid\n", named_val->val_name); + break; + default: + pr_warn("%s has unknown type %d\n", named_val->val_name, named_val->type); + break; + } +} +EXPORT_SYMBOL(kutf_helper_output_named_val); diff --git a/drivers/gpu/arm/midgard/tests/kutf/kutf_mem.c b/drivers/gpu/arm/midgard/tests/kutf/kutf_mem.c new file mode 100644 index 00000000000000..a75e15fde05f72 --- /dev/null +++ b/drivers/gpu/arm/midgard/tests/kutf/kutf_mem.c @@ -0,0 +1,102 @@ +/* + * + * (C) COPYRIGHT 2014, 2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +/* Kernel UTF memory management functions */ + +#include +#include + +#include + + +/** + * struct kutf_alloc_entry - Structure representing an allocation. + * @node: List node for use with kutf_mempool. + * @data: Data area of the allocation + */ +struct kutf_alloc_entry { + struct list_head node; + u8 data[0]; +}; + +int kutf_mempool_init(struct kutf_mempool *pool) +{ + if (!pool) { + pr_err("NULL pointer passed to %s\n", __func__); + return -1; + } + + INIT_LIST_HEAD(&pool->head); + mutex_init(&pool->lock); + + return 0; +} +EXPORT_SYMBOL(kutf_mempool_init); + +void kutf_mempool_destroy(struct kutf_mempool *pool) +{ + struct list_head *remove; + struct list_head *tmp; + + if (!pool) { + pr_err("NULL pointer passed to %s\n", __func__); + return; + } + + mutex_lock(&pool->lock); + list_for_each_safe(remove, tmp, &pool->head) { + struct kutf_alloc_entry *remove_alloc; + + remove_alloc = list_entry(remove, struct kutf_alloc_entry, node); + list_del(&remove_alloc->node); + kfree(remove_alloc); + } + mutex_unlock(&pool->lock); + +} +EXPORT_SYMBOL(kutf_mempool_destroy); + +void *kutf_mempool_alloc(struct kutf_mempool *pool, size_t size) +{ + struct kutf_alloc_entry *ret; + + if (!pool) { + pr_err("NULL pointer passed to %s\n", __func__); + goto fail_pool; + } + + mutex_lock(&pool->lock); + + ret = kmalloc(sizeof(*ret) + size, GFP_KERNEL); + if (!ret) { + pr_err("Failed to allocate memory\n"); + goto fail_alloc; + } + + INIT_LIST_HEAD(&ret->node); + list_add(&ret->node, &pool->head); + + mutex_unlock(&pool->lock); + + return &ret->data[0]; + +fail_alloc: + mutex_unlock(&pool->lock); +fail_pool: + return NULL; +} +EXPORT_SYMBOL(kutf_mempool_alloc); diff --git a/drivers/gpu/arm/midgard/tests/kutf/kutf_resultset.c b/drivers/gpu/arm/midgard/tests/kutf/kutf_resultset.c new file mode 100644 index 00000000000000..41645a4af46dcc --- /dev/null +++ b/drivers/gpu/arm/midgard/tests/kutf/kutf_resultset.c @@ -0,0 +1,159 @@ +/* + * + * (C) COPYRIGHT 2014, 2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +/* Kernel UTF result management functions */ + +#include +#include +#include +#include +#include +#include + +#include +#include + +/* Lock to protect all result structures */ +static DEFINE_SPINLOCK(kutf_result_lock); + +struct kutf_result_set *kutf_create_result_set(void) +{ + struct kutf_result_set *set; + + set = kmalloc(sizeof(*set), GFP_KERNEL); + if (!set) { + pr_err("Failed to allocate resultset"); + goto fail_alloc; + } + + INIT_LIST_HEAD(&set->results); + init_waitqueue_head(&set->waitq); + set->flags = 0; + + return set; + +fail_alloc: + return NULL; +} + +int kutf_add_result(struct kutf_context *context, + enum kutf_result_status status, + const char *message) +{ + struct kutf_mempool *mempool = &context->fixture_pool; + struct kutf_result_set *set = context->result_set; + /* Create the new result */ + struct kutf_result *new_result; + + BUG_ON(set == NULL); + + new_result = kutf_mempool_alloc(mempool, sizeof(*new_result)); + if (!new_result) { + pr_err("Result allocation failed\n"); + return -ENOMEM; + } + + INIT_LIST_HEAD(&new_result->node); + new_result->status = status; + new_result->message = message; + + spin_lock(&kutf_result_lock); + + list_add_tail(&new_result->node, &set->results); + + spin_unlock(&kutf_result_lock); + + wake_up(&set->waitq); + + return 0; +} + +void kutf_destroy_result_set(struct kutf_result_set *set) +{ + if (!list_empty(&set->results)) + pr_err("kutf_destroy_result_set: Unread results from test\n"); + + kfree(set); +} + +static bool kutf_has_result(struct kutf_result_set *set) +{ + bool has_result; + + spin_lock(&kutf_result_lock); + if (set->flags & KUTF_RESULT_SET_WAITING_FOR_INPUT) + /* Pretend there are results if waiting for input */ + has_result = true; + else + has_result = !list_empty(&set->results); + spin_unlock(&kutf_result_lock); + + return has_result; +} + +struct kutf_result *kutf_remove_result(struct kutf_result_set *set) +{ + struct kutf_result *result = NULL; + int ret; + + do { + ret = wait_event_interruptible(set->waitq, + kutf_has_result(set)); + + if (ret) + return ERR_PTR(ret); + + spin_lock(&kutf_result_lock); + + if (!list_empty(&set->results)) { + result = list_first_entry(&set->results, + struct kutf_result, + node); + list_del(&result->node); + } else if (set->flags & KUTF_RESULT_SET_WAITING_FOR_INPUT) { + /* Return a fake result */ + static struct kutf_result waiting = { + .status = KUTF_RESULT_USERDATA_WAIT + }; + result = &waiting; + } + /* If result == NULL then there was a race with the event + * being removed between the check in kutf_has_result and + * the lock being obtained. In this case we retry + */ + + spin_unlock(&kutf_result_lock); + } while (result == NULL); + + return result; +} + +void kutf_set_waiting_for_input(struct kutf_result_set *set) +{ + spin_lock(&kutf_result_lock); + set->flags |= KUTF_RESULT_SET_WAITING_FOR_INPUT; + spin_unlock(&kutf_result_lock); + + wake_up(&set->waitq); +} + +void kutf_clear_waiting_for_input(struct kutf_result_set *set) +{ + spin_lock(&kutf_result_lock); + set->flags &= ~KUTF_RESULT_SET_WAITING_FOR_INPUT; + spin_unlock(&kutf_result_lock); +} diff --git a/drivers/gpu/arm/midgard/tests/kutf/kutf_suite.c b/drivers/gpu/arm/midgard/tests/kutf/kutf_suite.c new file mode 100644 index 00000000000000..b0c06d5de7f13b --- /dev/null +++ b/drivers/gpu/arm/midgard/tests/kutf/kutf_suite.c @@ -0,0 +1,1244 @@ +/* + * + * (C) COPYRIGHT 2014, 2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +/* Kernel UTF suite, test and fixture management including user to kernel + * interaction */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include + +#if defined(CONFIG_DEBUG_FS) + +/** + * struct kutf_application - Structure which represents kutf application + * @name: The name of this test application. + * @dir: The debugfs directory for this test + * @suite_list: List head to store all the suites which are part of this + * application + */ +struct kutf_application { + const char *name; + struct dentry *dir; + struct list_head suite_list; +}; + +/** + * struct kutf_test_function - Structure which represents kutf test function + * @suite: Back reference to the suite this test function + * belongs to + * @filters: Filters that apply to this test function + * @test_id: Test ID + * @execute: Function to run for this test + * @test_data: Static data for this test + * @node: List node for test_list + * @variant_list: List head to store all the variants which can run on + * this function + * @dir: debugfs directory for this test function + */ +struct kutf_test_function { + struct kutf_suite *suite; + unsigned int filters; + unsigned int test_id; + void (*execute)(struct kutf_context *context); + union kutf_callback_data test_data; + struct list_head node; + struct list_head variant_list; + struct dentry *dir; +}; + +/** + * struct kutf_test_fixture - Structure which holds information on the kutf + * test fixture + * @test_func: Test function this fixture belongs to + * @fixture_index: Index of this fixture + * @node: List node for variant_list + * @dir: debugfs directory for this test fixture + */ +struct kutf_test_fixture { + struct kutf_test_function *test_func; + unsigned int fixture_index; + struct list_head node; + struct dentry *dir; +}; + +static struct dentry *base_dir; +static struct workqueue_struct *kutf_workq; + +/** + * struct kutf_convert_table - Structure which keeps test results + * @result_name: Status of the test result + * @result: Status value for a single test + */ +struct kutf_convert_table { + char result_name[50]; + enum kutf_result_status result; +}; + +struct kutf_convert_table kutf_convert[] = { +#define ADD_UTF_RESULT(_name) \ +{ \ + #_name, \ + _name, \ +}, +ADD_UTF_RESULT(KUTF_RESULT_BENCHMARK) +ADD_UTF_RESULT(KUTF_RESULT_SKIP) +ADD_UTF_RESULT(KUTF_RESULT_UNKNOWN) +ADD_UTF_RESULT(KUTF_RESULT_PASS) +ADD_UTF_RESULT(KUTF_RESULT_DEBUG) +ADD_UTF_RESULT(KUTF_RESULT_INFO) +ADD_UTF_RESULT(KUTF_RESULT_WARN) +ADD_UTF_RESULT(KUTF_RESULT_FAIL) +ADD_UTF_RESULT(KUTF_RESULT_FATAL) +ADD_UTF_RESULT(KUTF_RESULT_ABORT) +}; + +#define UTF_CONVERT_SIZE (ARRAY_SIZE(kutf_convert)) + +/** + * kutf_create_context() - Create a test context in which a specific fixture + * of an application will be run and its results + * reported back to the user + * @test_fix: Test fixture to be run. + * + * The context's refcount will be initialized to 1. + * + * Return: Returns the created test context on success or NULL on failure + */ +static struct kutf_context *kutf_create_context( + struct kutf_test_fixture *test_fix); + +/** + * kutf_destroy_context() - Destroy a previously created test context, only + * once its refcount has become zero + * @kref: pointer to kref member within the context + * + * This should only be used via a kref_put() call on the context's kref member + */ +static void kutf_destroy_context(struct kref *kref); + +/** + * kutf_context_get() - increment refcount on a context + * @context: the kutf context + * + * This must be used when the lifetime of the context might exceed that of the + * thread creating @context + */ +static void kutf_context_get(struct kutf_context *context); + +/** + * kutf_context_put() - decrement refcount on a context, destroying it when it + * reached zero + * @context: the kutf context + * + * This must be used only after a corresponding kutf_context_get() call on + * @context, and the caller no longer needs access to @context. + */ +static void kutf_context_put(struct kutf_context *context); + +/** + * kutf_set_result() - Set the test result against the specified test context + * @context: Test context + * @status: Result status + */ +static void kutf_set_result(struct kutf_context *context, + enum kutf_result_status status); + +/** + * kutf_set_expected_result() - Set the expected test result for the specified + * test context + * @context: Test context + * @expected_status: Expected result status + */ +static void kutf_set_expected_result(struct kutf_context *context, + enum kutf_result_status expected_status); + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0)) +/* Pre 3.4.0 kernels don't have the simple_open helper */ + +/** + * simple_open() - Helper for file opening which stores the inode private data + * into the file private data + * @inode: File entry representation + * @file: A specific opening of the file + * + * Return: always 0; if inode private data do not exist, the file will not + * be assigned private data + */ +static int simple_open(struct inode *inode, struct file *file) +{ + if (inode->i_private) + file->private_data = inode->i_private; + return 0; +} +#endif + +/** + * kutf_result_to_string() - Converts a KUTF result into a string + * @result_str: Output result string + * @result: Result status to convert + * + * Return: 1 if test result was successfully converted to string, 0 otherwise + */ +static int kutf_result_to_string(char **result_str, + enum kutf_result_status result) +{ + int i; + int ret = 0; + + for (i = 0; i < UTF_CONVERT_SIZE; i++) { + if (result == kutf_convert[i].result) { + *result_str = kutf_convert[i].result_name; + ret = 1; + } + } + return ret; +} + +/** + * kutf_debugfs_const_string_read() - Simple debugfs read callback which + * returns a constant string + * @file: Opened file to read from + * @buf: User buffer to write the data into + * @len: Amount of data to read + * @ppos: Offset into file to read from + * + * Return: On success, the number of bytes read and offset @ppos advanced by + * this number; on error, negative value + */ +static ssize_t kutf_debugfs_const_string_read(struct file *file, + char __user *buf, size_t len, loff_t *ppos) +{ + char *str = file->private_data; + + return simple_read_from_buffer(buf, len, ppos, str, strlen(str)); +} + +static const struct file_operations kutf_debugfs_const_string_ops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = kutf_debugfs_const_string_read, + .llseek = default_llseek, +}; + +/** + * kutf_add_explicit_result() - Check if an explicit result needs to be added + * @context: KUTF test context + */ +static void kutf_add_explicit_result(struct kutf_context *context) +{ + switch (context->expected_status) { + case KUTF_RESULT_UNKNOWN: + if (context->status == KUTF_RESULT_UNKNOWN) + kutf_test_pass(context, "(implicit pass)"); + break; + + case KUTF_RESULT_WARN: + if (context->status == KUTF_RESULT_WARN) + kutf_test_pass(context, + "Pass (expected warn occurred)"); + else if (context->status != KUTF_RESULT_SKIP) + kutf_test_fail(context, + "Fail (expected warn missing)"); + break; + + case KUTF_RESULT_FAIL: + if (context->status == KUTF_RESULT_FAIL) + kutf_test_pass(context, + "Pass (expected fail occurred)"); + else if (context->status != KUTF_RESULT_SKIP) { + /* Force the expected status so the fail gets logged */ + context->expected_status = KUTF_RESULT_PASS; + kutf_test_fail(context, + "Fail (expected fail missing)"); + } + break; + + case KUTF_RESULT_FATAL: + if (context->status == KUTF_RESULT_FATAL) + kutf_test_pass(context, + "Pass (expected fatal occurred)"); + else if (context->status != KUTF_RESULT_SKIP) + kutf_test_fail(context, + "Fail (expected fatal missing)"); + break; + + case KUTF_RESULT_ABORT: + if (context->status == KUTF_RESULT_ABORT) + kutf_test_pass(context, + "Pass (expected abort occurred)"); + else if (context->status != KUTF_RESULT_SKIP) + kutf_test_fail(context, + "Fail (expected abort missing)"); + break; + default: + break; + } +} + +static void kutf_run_test(struct work_struct *data) +{ + struct kutf_context *test_context = container_of(data, + struct kutf_context, work); + struct kutf_suite *suite = test_context->suite; + struct kutf_test_function *test_func; + + test_func = test_context->test_fix->test_func; + + /* + * Call the create fixture function if required before the + * fixture is run + */ + if (suite->create_fixture) + test_context->fixture = suite->create_fixture(test_context); + + /* Only run the test if the fixture was created (if required) */ + if ((suite->create_fixture && test_context->fixture) || + (!suite->create_fixture)) { + /* Run this fixture */ + test_func->execute(test_context); + + if (suite->remove_fixture) + suite->remove_fixture(test_context); + + kutf_add_explicit_result(test_context); + } + + kutf_add_result(test_context, KUTF_RESULT_TEST_FINISHED, NULL); + + kutf_context_put(test_context); +} + +/** + * kutf_debugfs_run_open() Debugfs open callback for the "run" entry. + * @inode: inode of the opened file + * @file: Opened file to read from + * + * This function creates a KUTF context and queues it onto a workqueue to be + * run asynchronously. The resulting file descriptor can be used to communicate + * userdata to the test and to read back the results of the test execution. + * + * Return: 0 on success + */ +static int kutf_debugfs_run_open(struct inode *inode, struct file *file) +{ + struct kutf_test_fixture *test_fix = inode->i_private; + struct kutf_context *test_context; + int err = 0; + + test_context = kutf_create_context(test_fix); + if (!test_context) { + err = -ENOMEM; + goto finish; + } + + file->private_data = test_context; + + /* This reference is release by the kutf_run_test */ + kutf_context_get(test_context); + + queue_work(kutf_workq, &test_context->work); + +finish: + return err; +} + +#define USERDATA_WARNING_MESSAGE "WARNING: This test requires userdata\n" + +/** + * kutf_debugfs_run_read() - Debugfs read callback for the "run" entry. + * @file: Opened file to read from + * @buf: User buffer to write the data into + * @len: Amount of data to read + * @ppos: Offset into file to read from + * + * This function emits the results of the test, blocking until they are + * available. + * + * If the test involves user data then this will also return user data records + * to user space. If the test is waiting for user data then this function will + * output a message (to make the likes of 'cat' display it), followed by + * returning 0 to mark the end of file. + * + * Results will be emitted one at a time, once all the results have been read + * 0 will be returned to indicate there is no more data. + * + * Return: Number of bytes read. + */ +static ssize_t kutf_debugfs_run_read(struct file *file, char __user *buf, + size_t len, loff_t *ppos) +{ + struct kutf_context *test_context = file->private_data; + struct kutf_result *res; + unsigned long bytes_not_copied; + ssize_t bytes_copied = 0; + char *kutf_str_ptr = NULL; + size_t kutf_str_len = 0; + size_t message_len = 0; + char separator = ':'; + char terminator = '\n'; + + res = kutf_remove_result(test_context->result_set); + + if (IS_ERR(res)) + return PTR_ERR(res); + + /* + * Handle 'fake' results - these results are converted to another + * form before being returned from the kernel + */ + switch (res->status) { + case KUTF_RESULT_TEST_FINISHED: + return 0; + case KUTF_RESULT_USERDATA_WAIT: + if (test_context->userdata.flags & + KUTF_USERDATA_WARNING_OUTPUT) { + /* + * Warning message already output, + * signal end-of-file + */ + return 0; + } + + message_len = sizeof(USERDATA_WARNING_MESSAGE)-1; + if (message_len > len) + message_len = len; + + bytes_not_copied = copy_to_user(buf, + USERDATA_WARNING_MESSAGE, + message_len); + if (bytes_not_copied != 0) + return -EFAULT; + test_context->userdata.flags |= KUTF_USERDATA_WARNING_OUTPUT; + return message_len; + case KUTF_RESULT_USERDATA: + message_len = strlen(res->message); + if (message_len > len-1) { + message_len = len-1; + pr_warn("User data truncated, read not long enough\n"); + } + bytes_not_copied = copy_to_user(buf, res->message, + message_len); + if (bytes_not_copied != 0) { + pr_warn("Failed to copy data to user space buffer\n"); + return -EFAULT; + } + /* Finally the terminator */ + bytes_not_copied = copy_to_user(&buf[message_len], + &terminator, 1); + if (bytes_not_copied != 0) { + pr_warn("Failed to copy data to user space buffer\n"); + return -EFAULT; + } + return message_len+1; + default: + /* Fall through - this is a test result */ + break; + } + + /* Note: This code assumes a result is read completely */ + kutf_result_to_string(&kutf_str_ptr, res->status); + if (kutf_str_ptr) + kutf_str_len = strlen(kutf_str_ptr); + + if (res->message) + message_len = strlen(res->message); + + if ((kutf_str_len + 1 + message_len + 1) > len) { + pr_err("Not enough space in user buffer for a single result"); + return 0; + } + + /* First copy the result string */ + if (kutf_str_ptr) { + bytes_not_copied = copy_to_user(&buf[0], kutf_str_ptr, + kutf_str_len); + bytes_copied += kutf_str_len - bytes_not_copied; + if (bytes_not_copied) + goto exit; + } + + /* Then the separator */ + bytes_not_copied = copy_to_user(&buf[bytes_copied], + &separator, 1); + bytes_copied += 1 - bytes_not_copied; + if (bytes_not_copied) + goto exit; + + /* Finally Next copy the result string */ + if (res->message) { + bytes_not_copied = copy_to_user(&buf[bytes_copied], + res->message, message_len); + bytes_copied += message_len - bytes_not_copied; + if (bytes_not_copied) + goto exit; + } + + /* Finally the terminator */ + bytes_not_copied = copy_to_user(&buf[bytes_copied], + &terminator, 1); + bytes_copied += 1 - bytes_not_copied; + +exit: + return bytes_copied; +} + +/** + * kutf_debugfs_run_write() Debugfs write callback for the "run" entry. + * @file: Opened file to write to + * @buf: User buffer to read the data from + * @len: Amount of data to write + * @ppos: Offset into file to write to + * + * This function allows user and kernel to exchange extra data necessary for + * the test fixture. + * + * The data is added to the first struct kutf_context running the fixture + * + * Return: Number of bytes written + */ +static ssize_t kutf_debugfs_run_write(struct file *file, + const char __user *buf, size_t len, loff_t *ppos) +{ + int ret = 0; + struct kutf_context *test_context = file->private_data; + + if (len > KUTF_MAX_LINE_LENGTH) + return -EINVAL; + + ret = kutf_helper_input_enqueue(test_context, buf, len); + if (ret < 0) + return ret; + + return len; +} + +/** + * kutf_debugfs_run_release() - Debugfs release callback for the "run" entry. + * @inode: File entry representation + * @file: A specific opening of the file + * + * Release any resources that were created during the opening of the file + * + * Note that resources may not be released immediately, that might only happen + * later when other users of the kutf_context release their refcount. + * + * Return: 0 on success + */ +static int kutf_debugfs_run_release(struct inode *inode, struct file *file) +{ + struct kutf_context *test_context = file->private_data; + + kutf_helper_input_enqueue_end_of_data(test_context); + + kutf_context_put(test_context); + return 0; +} + +static const struct file_operations kutf_debugfs_run_ops = { + .owner = THIS_MODULE, + .open = kutf_debugfs_run_open, + .read = kutf_debugfs_run_read, + .write = kutf_debugfs_run_write, + .release = kutf_debugfs_run_release, + .llseek = default_llseek, +}; + +/** + * create_fixture_variant() - Creates a fixture variant for the specified + * test function and index and the debugfs entries + * that represent it. + * @test_func: Test function + * @fixture_index: Fixture index + * + * Return: 0 on success, negative value corresponding to error code in failure + */ +static int create_fixture_variant(struct kutf_test_function *test_func, + unsigned int fixture_index) +{ + struct kutf_test_fixture *test_fix; + char name[11]; /* Enough to print the MAX_UINT32 + the null terminator */ + struct dentry *tmp; + int err; + + test_fix = kmalloc(sizeof(*test_fix), GFP_KERNEL); + if (!test_fix) { + pr_err("Failed to create debugfs directory when adding fixture\n"); + err = -ENOMEM; + goto fail_alloc; + } + + test_fix->test_func = test_func; + test_fix->fixture_index = fixture_index; + + snprintf(name, sizeof(name), "%d", fixture_index); + test_fix->dir = debugfs_create_dir(name, test_func->dir); + if (!test_func->dir) { + pr_err("Failed to create debugfs directory when adding fixture\n"); + /* Might not be the right error, we don't get it passed back to us */ + err = -EEXIST; + goto fail_dir; + } + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0) + debugfs_create_file("type", S_IROTH, test_fix->dir, "fixture\n", + &kutf_debugfs_const_string_ops); +#else + tmp = debugfs_create_file("type", S_IROTH, test_fix->dir, "fixture\n", + &kutf_debugfs_const_string_ops); + if (!tmp) { + pr_err("Failed to create debugfs file \"type\" when adding fixture\n"); + /* Might not be the right error, we don't get it passed back to us */ + err = -EEXIST; + goto fail_file; + } +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0) + debugfs_create_file_unsafe( +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0) + tmp = debugfs_create_file_unsafe( +#else + tmp = debugfs_create_file( +#endif + "run", 0600, test_fix->dir, + test_fix, + &kutf_debugfs_run_ops); +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 3, 0) + if (!tmp) { + pr_err("Failed to create debugfs file \"run\" when adding fixture\n"); + /* Might not be the right error, we don't get it passed back to us */ + err = -EEXIST; + goto fail_file; + } +#endif + + list_add(&test_fix->node, &test_func->variant_list); + return 0; + +fail_file: + debugfs_remove_recursive(test_fix->dir); +fail_dir: + kfree(test_fix); +fail_alloc: + return err; +} + +/** + * kutf_remove_test_variant() - Destroy a previously created fixture variant. + * @test_fix: Test fixture + */ +static void kutf_remove_test_variant(struct kutf_test_fixture *test_fix) +{ + debugfs_remove_recursive(test_fix->dir); + kfree(test_fix); +} + +void kutf_add_test_with_filters_and_data( + struct kutf_suite *suite, + unsigned int id, + const char *name, + void (*execute)(struct kutf_context *context), + unsigned int filters, + union kutf_callback_data test_data) +{ + struct kutf_test_function *test_func; + struct dentry *tmp; + unsigned int i; + + test_func = kmalloc(sizeof(*test_func), GFP_KERNEL); + if (!test_func) { + pr_err("Failed to allocate memory when adding test %s\n", name); + goto fail_alloc; + } + + INIT_LIST_HEAD(&test_func->variant_list); + + test_func->dir = debugfs_create_dir(name, suite->dir); + if (!test_func->dir) { + pr_err("Failed to create debugfs directory when adding test %s\n", name); + goto fail_dir; + } + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0) + debugfs_create_file("type", S_IROTH, test_func->dir, "test\n", + &kutf_debugfs_const_string_ops); +#else + tmp = debugfs_create_file("type", S_IROTH, test_func->dir, "test\n", + &kutf_debugfs_const_string_ops); + if (!tmp) { + pr_err("Failed to create debugfs file \"type\" when adding test %s\n", name); + goto fail_file; + } +#endif + + test_func->filters = filters; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0) + debugfs_create_x32("filters", S_IROTH, test_func->dir, + &test_func->filters); +#else + tmp = debugfs_create_x32("filters", S_IROTH, test_func->dir, + &test_func->filters); + if (!tmp) { + pr_err("Failed to create debugfs file \"filters\" when adding test %s\n", name); + goto fail_file; + } +#endif + + test_func->test_id = id; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0) + debugfs_create_u32("test_id", S_IROTH, test_func->dir, + &test_func->test_id); +#else + tmp = debugfs_create_u32("test_id", S_IROTH, test_func->dir, + &test_func->test_id); + if (!tmp) { + pr_err("Failed to create debugfs file \"test_id\" when adding test %s\n", name); + goto fail_file; + } +#endif + + for (i = 0; i < suite->fixture_variants; i++) { + if (create_fixture_variant(test_func, i)) { + pr_err("Failed to create fixture %d when adding test %s\n", i, name); + goto fail_file; + } + } + + test_func->suite = suite; + test_func->execute = execute; + test_func->test_data = test_data; + + list_add(&test_func->node, &suite->test_list); + return; + +fail_file: + debugfs_remove_recursive(test_func->dir); +fail_dir: + kfree(test_func); +fail_alloc: + return; +} +EXPORT_SYMBOL(kutf_add_test_with_filters_and_data); + +void kutf_add_test_with_filters( + struct kutf_suite *suite, + unsigned int id, + const char *name, + void (*execute)(struct kutf_context *context), + unsigned int filters) +{ + union kutf_callback_data data; + + data.ptr_value = NULL; + + kutf_add_test_with_filters_and_data(suite, + id, + name, + execute, + suite->suite_default_flags, + data); +} +EXPORT_SYMBOL(kutf_add_test_with_filters); + +void kutf_add_test(struct kutf_suite *suite, + unsigned int id, + const char *name, + void (*execute)(struct kutf_context *context)) +{ + union kutf_callback_data data; + + data.ptr_value = NULL; + + kutf_add_test_with_filters_and_data(suite, + id, + name, + execute, + suite->suite_default_flags, + data); +} +EXPORT_SYMBOL(kutf_add_test); + +/** + * kutf_remove_test(): Remove a previously added test function. + * @test_func: Test function + */ +static void kutf_remove_test(struct kutf_test_function *test_func) +{ + struct list_head *pos; + struct list_head *tmp; + + list_for_each_safe(pos, tmp, &test_func->variant_list) { + struct kutf_test_fixture *test_fix; + + test_fix = list_entry(pos, struct kutf_test_fixture, node); + kutf_remove_test_variant(test_fix); + } + + list_del(&test_func->node); + debugfs_remove_recursive(test_func->dir); + kfree(test_func); +} + +struct kutf_suite *kutf_create_suite_with_filters_and_data( + struct kutf_application *app, + const char *name, + unsigned int fixture_count, + void *(*create_fixture)(struct kutf_context *context), + void (*remove_fixture)(struct kutf_context *context), + unsigned int filters, + union kutf_callback_data suite_data) +{ + struct kutf_suite *suite; + struct dentry *tmp; + + suite = kmalloc(sizeof(*suite), GFP_KERNEL); + if (!suite) { + pr_err("Failed to allocate memory when creating suite %s\n", name); + goto fail_kmalloc; + } + + suite->dir = debugfs_create_dir(name, app->dir); + if (!suite->dir) { + pr_err("Failed to create debugfs directory when adding test %s\n", name); + goto fail_debugfs; + } + + tmp = debugfs_create_file("type", S_IROTH, suite->dir, "suite\n", + &kutf_debugfs_const_string_ops); + if (!tmp) { + pr_err("Failed to create debugfs file \"type\" when adding test %s\n", name); + goto fail_file; + } + + INIT_LIST_HEAD(&suite->test_list); + suite->app = app; + suite->name = name; + suite->fixture_variants = fixture_count; + suite->create_fixture = create_fixture; + suite->remove_fixture = remove_fixture; + suite->suite_default_flags = filters; + suite->suite_data = suite_data; + + list_add(&suite->node, &app->suite_list); + + return suite; + +fail_file: + debugfs_remove_recursive(suite->dir); +fail_debugfs: + kfree(suite); +fail_kmalloc: + return NULL; +} +EXPORT_SYMBOL(kutf_create_suite_with_filters_and_data); + +struct kutf_suite *kutf_create_suite_with_filters( + struct kutf_application *app, + const char *name, + unsigned int fixture_count, + void *(*create_fixture)(struct kutf_context *context), + void (*remove_fixture)(struct kutf_context *context), + unsigned int filters) +{ + union kutf_callback_data data; + + data.ptr_value = NULL; + return kutf_create_suite_with_filters_and_data(app, + name, + fixture_count, + create_fixture, + remove_fixture, + filters, + data); +} +EXPORT_SYMBOL(kutf_create_suite_with_filters); + +struct kutf_suite *kutf_create_suite( + struct kutf_application *app, + const char *name, + unsigned int fixture_count, + void *(*create_fixture)(struct kutf_context *context), + void (*remove_fixture)(struct kutf_context *context)) +{ + union kutf_callback_data data; + + data.ptr_value = NULL; + return kutf_create_suite_with_filters_and_data(app, + name, + fixture_count, + create_fixture, + remove_fixture, + KUTF_F_TEST_GENERIC, + data); +} +EXPORT_SYMBOL(kutf_create_suite); + +/** + * kutf_destroy_suite() - Destroy a previously added test suite. + * @suite: Test suite + */ +static void kutf_destroy_suite(struct kutf_suite *suite) +{ + struct list_head *pos; + struct list_head *tmp; + + list_for_each_safe(pos, tmp, &suite->test_list) { + struct kutf_test_function *test_func; + + test_func = list_entry(pos, struct kutf_test_function, node); + kutf_remove_test(test_func); + } + + list_del(&suite->node); + debugfs_remove_recursive(suite->dir); + kfree(suite); +} + +struct kutf_application *kutf_create_application(const char *name) +{ + struct kutf_application *app; + struct dentry *tmp; + + app = kmalloc(sizeof(*app), GFP_KERNEL); + if (!app) { + pr_err("Failed to create allocate memory when creating application %s\n", name); + goto fail_kmalloc; + } + + app->dir = debugfs_create_dir(name, base_dir); + if (!app->dir) { + pr_err("Failed to create debugfs direcotry when creating application %s\n", name); + goto fail_debugfs; + } + + tmp = debugfs_create_file("type", S_IROTH, app->dir, "application\n", + &kutf_debugfs_const_string_ops); + if (!tmp) { + pr_err("Failed to create debugfs file \"type\" when creating application %s\n", name); + goto fail_file; + } + + INIT_LIST_HEAD(&app->suite_list); + app->name = name; + + return app; + +fail_file: + debugfs_remove_recursive(app->dir); +fail_debugfs: + kfree(app); +fail_kmalloc: + return NULL; +} +EXPORT_SYMBOL(kutf_create_application); + +void kutf_destroy_application(struct kutf_application *app) +{ + struct list_head *pos; + struct list_head *tmp; + + list_for_each_safe(pos, tmp, &app->suite_list) { + struct kutf_suite *suite; + + suite = list_entry(pos, struct kutf_suite, node); + kutf_destroy_suite(suite); + } + + debugfs_remove_recursive(app->dir); + kfree(app); +} +EXPORT_SYMBOL(kutf_destroy_application); + +static struct kutf_context *kutf_create_context( + struct kutf_test_fixture *test_fix) +{ + struct kutf_context *new_context; + + new_context = kmalloc(sizeof(*new_context), GFP_KERNEL); + if (!new_context) { + pr_err("Failed to allocate test context"); + goto fail_alloc; + } + + new_context->result_set = kutf_create_result_set(); + if (!new_context->result_set) { + pr_err("Failed to create result set"); + goto fail_result_set; + } + + new_context->test_fix = test_fix; + /* Save the pointer to the suite as the callbacks will require it */ + new_context->suite = test_fix->test_func->suite; + new_context->status = KUTF_RESULT_UNKNOWN; + new_context->expected_status = KUTF_RESULT_UNKNOWN; + + kutf_mempool_init(&new_context->fixture_pool); + new_context->fixture = NULL; + new_context->fixture_index = test_fix->fixture_index; + new_context->fixture_name = NULL; + new_context->test_data = test_fix->test_func->test_data; + + new_context->userdata.flags = 0; + INIT_LIST_HEAD(&new_context->userdata.input_head); + init_waitqueue_head(&new_context->userdata.input_waitq); + + INIT_WORK(&new_context->work, kutf_run_test); + + kref_init(&new_context->kref); + + return new_context; + +fail_result_set: + kfree(new_context); +fail_alloc: + return NULL; +} + +static void kutf_destroy_context(struct kref *kref) +{ + struct kutf_context *context; + + context = container_of(kref, struct kutf_context, kref); + kutf_destroy_result_set(context->result_set); + kutf_mempool_destroy(&context->fixture_pool); + kfree(context); +} + +static void kutf_context_get(struct kutf_context *context) +{ + kref_get(&context->kref); +} + +static void kutf_context_put(struct kutf_context *context) +{ + kref_put(&context->kref, kutf_destroy_context); +} + + +static void kutf_set_result(struct kutf_context *context, + enum kutf_result_status status) +{ + context->status = status; +} + +static void kutf_set_expected_result(struct kutf_context *context, + enum kutf_result_status expected_status) +{ + context->expected_status = expected_status; +} + +/** + * kutf_test_log_result() - Log a result for the specified test context + * @context: Test context + * @message: Result string + * @new_status: Result status + */ +static void kutf_test_log_result( + struct kutf_context *context, + const char *message, + enum kutf_result_status new_status) +{ + if (context->status < new_status) + context->status = new_status; + + if (context->expected_status != new_status) + kutf_add_result(context, new_status, message); +} + +void kutf_test_log_result_external( + struct kutf_context *context, + const char *message, + enum kutf_result_status new_status) +{ + kutf_test_log_result(context, message, new_status); +} +EXPORT_SYMBOL(kutf_test_log_result_external); + +void kutf_test_expect_abort(struct kutf_context *context) +{ + kutf_set_expected_result(context, KUTF_RESULT_ABORT); +} +EXPORT_SYMBOL(kutf_test_expect_abort); + +void kutf_test_expect_fatal(struct kutf_context *context) +{ + kutf_set_expected_result(context, KUTF_RESULT_FATAL); +} +EXPORT_SYMBOL(kutf_test_expect_fatal); + +void kutf_test_expect_fail(struct kutf_context *context) +{ + kutf_set_expected_result(context, KUTF_RESULT_FAIL); +} +EXPORT_SYMBOL(kutf_test_expect_fail); + +void kutf_test_expect_warn(struct kutf_context *context) +{ + kutf_set_expected_result(context, KUTF_RESULT_WARN); +} +EXPORT_SYMBOL(kutf_test_expect_warn); + +void kutf_test_expect_pass(struct kutf_context *context) +{ + kutf_set_expected_result(context, KUTF_RESULT_PASS); +} +EXPORT_SYMBOL(kutf_test_expect_pass); + +void kutf_test_skip(struct kutf_context *context) +{ + kutf_set_result(context, KUTF_RESULT_SKIP); + kutf_set_expected_result(context, KUTF_RESULT_UNKNOWN); + + kutf_test_log_result(context, "Test skipped", KUTF_RESULT_SKIP); +} +EXPORT_SYMBOL(kutf_test_skip); + +void kutf_test_skip_msg(struct kutf_context *context, const char *message) +{ + kutf_set_result(context, KUTF_RESULT_SKIP); + kutf_set_expected_result(context, KUTF_RESULT_UNKNOWN); + + kutf_test_log_result(context, kutf_dsprintf(&context->fixture_pool, + "Test skipped: %s", message), KUTF_RESULT_SKIP); + kutf_test_log_result(context, "!!!Test skipped!!!", KUTF_RESULT_SKIP); +} +EXPORT_SYMBOL(kutf_test_skip_msg); + +void kutf_test_debug(struct kutf_context *context, char const *message) +{ + kutf_test_log_result(context, message, KUTF_RESULT_DEBUG); +} +EXPORT_SYMBOL(kutf_test_debug); + +void kutf_test_pass(struct kutf_context *context, char const *message) +{ + static const char explicit_message[] = "(explicit pass)"; + + if (!message) + message = explicit_message; + + kutf_test_log_result(context, message, KUTF_RESULT_PASS); +} +EXPORT_SYMBOL(kutf_test_pass); + +void kutf_test_info(struct kutf_context *context, char const *message) +{ + kutf_test_log_result(context, message, KUTF_RESULT_INFO); +} +EXPORT_SYMBOL(kutf_test_info); + +void kutf_test_warn(struct kutf_context *context, char const *message) +{ + kutf_test_log_result(context, message, KUTF_RESULT_WARN); +} +EXPORT_SYMBOL(kutf_test_warn); + +void kutf_test_fail(struct kutf_context *context, char const *message) +{ + kutf_test_log_result(context, message, KUTF_RESULT_FAIL); +} +EXPORT_SYMBOL(kutf_test_fail); + +void kutf_test_fatal(struct kutf_context *context, char const *message) +{ + kutf_test_log_result(context, message, KUTF_RESULT_FATAL); +} +EXPORT_SYMBOL(kutf_test_fatal); + +void kutf_test_abort(struct kutf_context *context) +{ + kutf_test_log_result(context, "", KUTF_RESULT_ABORT); +} +EXPORT_SYMBOL(kutf_test_abort); + +/** + * init_kutf_core() - Module entry point. + * + * Create the base entry point in debugfs. + */ +static int __init init_kutf_core(void) +{ + kutf_workq = alloc_workqueue("kutf workq", WQ_UNBOUND, 1); + if (!kutf_workq) + return -ENOMEM; + + base_dir = debugfs_create_dir("kutf_tests", NULL); + if (!base_dir) { + destroy_workqueue(kutf_workq); + kutf_workq = NULL; + return -ENOMEM; + } + + return 0; +} + +/** + * exit_kutf_core() - Module exit point. + * + * Remove the base entry point in debugfs. + */ +static void __exit exit_kutf_core(void) +{ + debugfs_remove_recursive(base_dir); + + if (kutf_workq) + destroy_workqueue(kutf_workq); +} + +#else /* defined(CONFIG_DEBUG_FS) */ + +/** + * init_kutf_core() - Module entry point. + * + * Stub for when build against a kernel without debugfs support + */ +static int __init init_kutf_core(void) +{ + pr_debug("KUTF requires a kernel with debug fs support"); + + return -ENODEV; +} + +/** + * exit_kutf_core() - Module exit point. + * + * Stub for when build against a kernel without debugfs support + */ +static void __exit exit_kutf_core(void) +{ +} +#endif /* defined(CONFIG_DEBUG_FS) */ + +MODULE_LICENSE("GPL"); + +module_init(init_kutf_core); +module_exit(exit_kutf_core); diff --git a/drivers/gpu/arm/midgard/tests/kutf/kutf_utils.c b/drivers/gpu/arm/midgard/tests/kutf/kutf_utils.c new file mode 100644 index 00000000000000..a429a2dbf7880e --- /dev/null +++ b/drivers/gpu/arm/midgard/tests/kutf/kutf_utils.c @@ -0,0 +1,71 @@ +/* + * + * (C) COPYRIGHT 2014, 2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +/* Kernel UTF utility functions */ + +#include +#include +#include +#include + +#include +#include + +static char tmp_buffer[KUTF_MAX_DSPRINTF_LEN]; + +DEFINE_MUTEX(buffer_lock); + +const char *kutf_dsprintf(struct kutf_mempool *pool, + const char *fmt, ...) +{ + va_list args; + int len; + int size; + void *buffer; + + mutex_lock(&buffer_lock); + va_start(args, fmt); + len = vsnprintf(tmp_buffer, sizeof(tmp_buffer), fmt, args); + va_end(args); + + if (len < 0) { + pr_err("kutf_dsprintf: Bad format dsprintf format %s\n", fmt); + goto fail_format; + } + + if (len >= sizeof(tmp_buffer)) { + pr_warn("kutf_dsprintf: Truncated dsprintf message %s\n", fmt); + size = sizeof(tmp_buffer); + } else { + size = len + 1; + } + + buffer = kutf_mempool_alloc(pool, size); + if (!buffer) + goto fail_alloc; + + memcpy(buffer, tmp_buffer, size); + mutex_unlock(&buffer_lock); + + return buffer; + +fail_alloc: +fail_format: + mutex_unlock(&buffer_lock); + return NULL; +} +EXPORT_SYMBOL(kutf_dsprintf); diff --git a/drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/Kbuild b/drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/Kbuild new file mode 100644 index 00000000000000..0cd9cebe9d8bd7 --- /dev/null +++ b/drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/Kbuild @@ -0,0 +1,20 @@ +# +# (C) COPYRIGHT 2017 ARM Limited. All rights reserved. +# +# This program is free software and is provided to you under the terms of the +# GNU General Public License version 2 as published by the Free Software +# Foundation, and any use by you of this program is subject to the terms +# of such GNU licence. +# +# A copy of the licence is included with the program, and can also be obtained +# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, +# Boston, MA 02110-1301, USA. +# +# + + +ccflags-y += -I$(src)/../include -I$(src)/../../../ -I$(src)/../../ -I$(src)/../../backend/gpu -I$(srctree)/drivers/staging/android + +obj-$(CONFIG_MALI_IRQ_LATENCY) += mali_kutf_irq_test.o + +mali_kutf_irq_test-y := mali_kutf_irq_test_main.o diff --git a/drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/Kconfig b/drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/Kconfig new file mode 100644 index 00000000000000..f4553d3aa08ab2 --- /dev/null +++ b/drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/Kconfig @@ -0,0 +1,23 @@ +# +# (C) COPYRIGHT 2017 ARM Limited. All rights reserved. +# +# This program is free software and is provided to you under the terms of the +# GNU General Public License version 2 as published by the Free Software +# Foundation, and any use by you of this program is subject to the terms +# of such GNU licence. +# +# A copy of the licence is included with the program, and can also be obtained +# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, +# Boston, MA 02110-1301, USA. +# +# + + +config MALI_IRQ_LATENCY + tristate "Mali GPU IRQ latency measurement" + depends on MALI_MIDGARD && MALI_DEBUG && MALI_KUTF + default m + help + This option will build a test module mali_kutf_irq_test that + can determine the latency of the Mali GPU IRQ on your system. + Choosing M here will generate a single module called mali_kutf_irq_test. diff --git a/drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/Makefile b/drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/Makefile new file mode 100644 index 00000000000000..2ac4f97aa397a9 --- /dev/null +++ b/drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/Makefile @@ -0,0 +1,47 @@ +# +# (C) COPYRIGHT 2015, 2017 ARM Limited. All rights reserved. +# +# This program is free software and is provided to you under the terms of the +# GNU General Public License version 2 as published by the Free Software +# Foundation, and any use by you of this program is subject to the terms +# of such GNU licence. +# +# A copy of the licence is included with the program, and can also be obtained +# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, +# Boston, MA 02110-1301, USA. +# +# + + +# linux build system bootstrap for out-of-tree module + +# default to building for the host +ARCH ?= $(shell uname -m) + +ifeq ($(KDIR),) +$(error Must specify KDIR to point to the kernel to target)) +endif + +TEST_CCFLAGS := \ + -DMALI_DEBUG=$(MALI_DEBUG) \ + -DMALI_BACKEND_KERNEL=$(MALI_BACKEND_KERNEL) \ + -DMALI_NO_MALI=$(MALI_NO_MALI) \ + -DMALI_UNIT_TEST=$(MALI_UNIT_TEST) \ + -DMALI_USE_UMP=$(MALI_USE_UMP) \ + -DMALI_ERROR_INJECT_ON=$(MALI_ERROR_INJECT_ON) \ + -DMALI_CUSTOMER_RELEASE=$(MALI_CUSTOMER_RELEASE) \ + $(SCONS_CFLAGS) \ + -I$(CURDIR)/../include \ + -I$(CURDIR)/../../../../../../include \ + -I$(CURDIR)/../../../ \ + -I$(CURDIR)/../../ \ + -I$(CURDIR)/../../backend/gpu \ + -I$(CURDIR)/ \ + -I$(srctree)/drivers/staging/android \ + -I$(srctree)/include/linux + +all: + $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) $(SCONS_CONFIGS) EXTRA_CFLAGS="$(TEST_CCFLAGS)" KBUILD_EXTRA_SYMBOLS="$(CURDIR)/../kutf/Module.symvers $(CURDIR)/../../Module.symvers" modules + +clean: + $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) clean diff --git a/drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/mali_kutf_irq_test_main.c b/drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/mali_kutf_irq_test_main.c new file mode 100644 index 00000000000000..9834efd94a0491 --- /dev/null +++ b/drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/mali_kutf_irq_test_main.c @@ -0,0 +1,280 @@ +/* + * + * (C) COPYRIGHT 2016, 2017 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained + * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + + + +#include +#include +#include + +#include "mali_kbase.h" +#include + +#include +#include + +/* + * This file contains the code which is used for measuring interrupt latency + * of the Mali GPU IRQ. In particular, function mali_kutf_irq_latency() is + * used with this purpose and it is called within KUTF framework - a kernel + * unit test framework. The measured latency provided by this test should + * be representative for the latency of the Mali JOB/MMU IRQs as well. + */ + +/* KUTF test application pointer for this test */ +struct kutf_application *irq_app; + +/** + * struct kutf_irq_fixture data - test fixture used by the test functions. + * @kbdev: kbase device for the GPU. + * + */ +struct kutf_irq_fixture_data { + struct kbase_device *kbdev; +}; + +#define SEC_TO_NANO(s) ((s)*1000000000LL) + +/* ID for the GPU IRQ */ +#define GPU_IRQ_HANDLER 2 + +#define NR_TEST_IRQS 1000000 + +/* IRQ for the test to trigger. Currently MULTIPLE_GPU_FAULTS as we would not + * expect to see this in normal use (e.g., when Android is running). */ +#define TEST_IRQ MULTIPLE_GPU_FAULTS + +#define IRQ_TIMEOUT HZ + +/* Kernel API for setting irq throttle hook callback and irq time in us*/ +extern int kbase_set_custom_irq_handler(struct kbase_device *kbdev, + irq_handler_t custom_handler, + int irq_type); +extern irqreturn_t kbase_gpu_irq_handler(int irq, void *data); + +static DECLARE_WAIT_QUEUE_HEAD(wait); +static bool triggered; +static u64 irq_time; + +static void *kbase_untag(void *ptr) +{ + return (void *)(((uintptr_t) ptr) & ~3); +} + +/** + * kbase_gpu_irq_custom_handler - Custom IRQ throttle handler + * @irq: IRQ number + * @data: Data associated with this IRQ + * + * Return: state of the IRQ + */ +static irqreturn_t kbase_gpu_irq_custom_handler(int irq, void *data) +{ + struct kbase_device *kbdev = kbase_untag(data); + u32 val; + + val = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_STATUS), NULL); + if (val & TEST_IRQ) { +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0) + struct timespec64 tval; + + ktime_get_real_ts64(&tval); +#else + struct timespec tval; + + getnstimeofday(&tval); +#endif + irq_time = SEC_TO_NANO(tval.tv_sec) + (tval.tv_nsec); + + kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_CLEAR), val, + NULL); + + triggered = true; + wake_up(&wait); + + return IRQ_HANDLED; + } + + /* Trigger main irq handler */ + return kbase_gpu_irq_handler(irq, data); +} + +/** + * mali_kutf_irq_default_create_fixture() - Creates the fixture data required + * for all the tests in the irq suite. + * @context: KUTF context. + * + * Return: Fixture data created on success or NULL on failure + */ +static void *mali_kutf_irq_default_create_fixture( + struct kutf_context *context) +{ + struct kutf_irq_fixture_data *data; + + data = kutf_mempool_alloc(&context->fixture_pool, + sizeof(struct kutf_irq_fixture_data)); + + if (!data) + goto fail; + + /* Acquire the kbase device */ + data->kbdev = kbase_find_device(-1); + if (data->kbdev == NULL) { + kutf_test_fail(context, "Failed to find kbase device"); + goto fail; + } + + return data; + +fail: + return NULL; +} + +/** + * mali_kutf_irq_default_remove_fixture() - Destroy fixture data previously + * created by mali_kutf_irq_default_create_fixture. + * + * @context: KUTF context. + */ +static void mali_kutf_irq_default_remove_fixture( + struct kutf_context *context) +{ + struct kutf_irq_fixture_data *data = context->fixture; + struct kbase_device *kbdev = data->kbdev; + + kbase_release_device(kbdev); +} + +/** + * mali_kutf_irq_latency() - measure GPU IRQ latency + * @context: kutf context within which to perform the test + * + * The test triggers IRQs manually, and measures the + * time between triggering the IRQ and the IRQ handler being executed. + * + * This is not a traditional test, in that the pass/fail status has little + * meaning (other than indicating that the IRQ handler executed at all). Instead + * the results are in the latencies provided with the test result. There is no + * meaningful pass/fail result that can be obtained here, instead the latencies + * are provided for manual analysis only. + */ +static void mali_kutf_irq_latency(struct kutf_context *context) +{ + struct kutf_irq_fixture_data *data = context->fixture; + struct kbase_device *kbdev = data->kbdev; + u64 min_time = U64_MAX, max_time = 0, average_time = 0; + int i; + bool test_failed = false; + + /* Force GPU to be powered */ + kbase_pm_context_active(kbdev); + + kbase_set_custom_irq_handler(kbdev, kbase_gpu_irq_custom_handler, + GPU_IRQ_HANDLER); + + for (i = 0; i < NR_TEST_IRQS; i++) { +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0) + struct timespec64 tval; + ktime_get_real_ts64(&tval); +#else + struct timespec tval; + getnstimeofday(&tval); +#endif + u64 start_time; + int ret; + + triggered = false; + start_time = SEC_TO_NANO(tval.tv_sec) + (tval.tv_nsec); + + /* Trigger fake IRQ */ + kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_RAWSTAT), + TEST_IRQ, NULL); + + ret = wait_event_timeout(wait, triggered != false, IRQ_TIMEOUT); + + if (ret == 0) { + kutf_test_fail(context, "Timed out waiting for IRQ\n"); + test_failed = true; + break; + } + + if ((irq_time - start_time) < min_time) + min_time = irq_time - start_time; + if ((irq_time - start_time) > max_time) + max_time = irq_time - start_time; + average_time += irq_time - start_time; + + udelay(10); + } + + /* Go back to default handler */ + kbase_set_custom_irq_handler(kbdev, NULL, GPU_IRQ_HANDLER); + + kbase_pm_context_idle(kbdev); + + if (!test_failed) { + const char *results; + + do_div(average_time, NR_TEST_IRQS); + results = kutf_dsprintf(&context->fixture_pool, + "Min latency = %lldns, Max latency = %lldns, Average latency = %lldns\n", + min_time, max_time, average_time); + kutf_test_pass(context, results); + } +} + +/** + * Module entry point for this test. + */ +int mali_kutf_irq_test_main_init(void) +{ + struct kutf_suite *suite; + + irq_app = kutf_create_application("irq"); + + if (NULL == irq_app) { + pr_warn("Creation of test application failed!\n"); + return -ENOMEM; + } + + suite = kutf_create_suite(irq_app, "irq_default", + 1, mali_kutf_irq_default_create_fixture, + mali_kutf_irq_default_remove_fixture); + + if (NULL == suite) { + pr_warn("Creation of test suite failed!\n"); + kutf_destroy_application(irq_app); + return -ENOMEM; + } + + kutf_add_test(suite, 0x0, "irq_latency", + mali_kutf_irq_latency); + return 0; +} + +/** + * Module exit point for this test. + */ +void mali_kutf_irq_test_main_exit(void) +{ + kutf_destroy_application(irq_app); +} + +module_init(mali_kutf_irq_test_main_init); +module_exit(mali_kutf_irq_test_main_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("ARM Ltd."); +MODULE_VERSION("1.0"); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 30c6b9edddb506..0f7749e9424d41 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -2278,8 +2278,7 @@ void amdgpu_dm_update_connector_after_detect( drm_connector_update_edid_property(connector, aconnector->edid); - aconnector->num_modes = drm_add_edid_modes(connector, aconnector->edid); - drm_connector_list_update(connector); + drm_add_edid_modes(connector, aconnector->edid); if (aconnector->dc_link->aux_mode) drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux, diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c index 6b431db146cd97..1c6e401dd4ccee 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c @@ -704,24 +704,24 @@ static struct wm_table ddr4_wm_table_rn = { .wm_inst = WM_B, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.72, - .sr_exit_time_us = 10.12, - .sr_enter_plus_exit_time_us = 11.48, + .sr_exit_time_us = 11.12, + .sr_enter_plus_exit_time_us = 12.48, .valid = true, }, { .wm_inst = WM_C, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.72, - .sr_exit_time_us = 10.12, - .sr_enter_plus_exit_time_us = 11.48, + .sr_exit_time_us = 11.12, + .sr_enter_plus_exit_time_us = 12.48, .valid = true, }, { .wm_inst = WM_D, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.72, - .sr_exit_time_us = 10.12, - .sr_enter_plus_exit_time_us = 11.48, + .sr_exit_time_us = 11.12, + .sr_enter_plus_exit_time_us = 12.48, .valid = true, }, } diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c index b409f6b2bfd832..210466b2d8631f 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c @@ -119,7 +119,8 @@ static const struct link_encoder_funcs dce110_lnk_enc_funcs = { .disable_hpd = dce110_link_encoder_disable_hpd, .is_dig_enabled = dce110_is_dig_enabled, .destroy = dce110_link_encoder_destroy, - .get_max_link_cap = dce110_link_encoder_get_max_link_cap + .get_max_link_cap = dce110_link_encoder_get_max_link_cap, + .get_dig_frontend = dce110_get_dig_frontend, }; static enum bp_result link_transmitter_control( @@ -235,6 +236,44 @@ static void set_link_training_complete( } +unsigned int dce110_get_dig_frontend(struct link_encoder *enc) +{ + struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc); + u32 value; + enum engine_id result; + + REG_GET(DIG_BE_CNTL, DIG_FE_SOURCE_SELECT, &value); + + switch (value) { + case DCE110_DIG_FE_SOURCE_SELECT_DIGA: + result = ENGINE_ID_DIGA; + break; + case DCE110_DIG_FE_SOURCE_SELECT_DIGB: + result = ENGINE_ID_DIGB; + break; + case DCE110_DIG_FE_SOURCE_SELECT_DIGC: + result = ENGINE_ID_DIGC; + break; + case DCE110_DIG_FE_SOURCE_SELECT_DIGD: + result = ENGINE_ID_DIGD; + break; + case DCE110_DIG_FE_SOURCE_SELECT_DIGE: + result = ENGINE_ID_DIGE; + break; + case DCE110_DIG_FE_SOURCE_SELECT_DIGF: + result = ENGINE_ID_DIGF; + break; + case DCE110_DIG_FE_SOURCE_SELECT_DIGG: + result = ENGINE_ID_DIGG; + break; + default: + // invalid source select DIG + result = ENGINE_ID_UNKNOWN; + } + + return result; +} + void dce110_link_encoder_set_dp_phy_pattern_training_pattern( struct link_encoder *enc, uint32_t index) @@ -1665,7 +1704,8 @@ static const struct link_encoder_funcs dce60_lnk_enc_funcs = { .disable_hpd = dce110_link_encoder_disable_hpd, .is_dig_enabled = dce110_is_dig_enabled, .destroy = dce110_link_encoder_destroy, - .get_max_link_cap = dce110_link_encoder_get_max_link_cap + .get_max_link_cap = dce110_link_encoder_get_max_link_cap, + .get_dig_frontend = dce110_get_dig_frontend }; void dce60_link_encoder_construct( diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h index cb714a48b171c3..fc6ade824c231a 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h @@ -295,6 +295,8 @@ void dce110_link_encoder_connect_dig_be_to_fe( enum engine_id engine, bool connect); +unsigned int dce110_get_dig_frontend(struct link_encoder *enc); + void dce110_link_encoder_set_dp_phy_pattern_training_pattern( struct link_encoder *enc, uint32_t index); diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c index 0c79a9ba48bb64..eec4f94ac3daa0 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c @@ -971,7 +971,7 @@ static int is_color_space_conversion(struct dw_hdmi *hdmi) is_output_rgb = hdmi_bus_fmt_is_rgb(hdmi_data->enc_out_bus_format); return (is_input_rgb != is_output_rgb) || - (is_input_rgb && is_output_rgb && hdmi_data->rgb_limited_range); + hdmi_data->rgb_limited_range; } static int is_color_space_decimation(struct dw_hdmi *hdmi) @@ -1026,8 +1026,7 @@ static void dw_hdmi_update_csc_coeffs(struct dw_hdmi *hdmi) else csc_coeff = &csc_coeff_rgb_in_eitu709; csc_scale = 0; - } else if (is_input_rgb && is_output_rgb && - hdmi->hdmi_data.rgb_limited_range) { + } else if (hdmi->hdmi_data.rgb_limited_range) { csc_coeff = &csc_coeff_rgb_full_to_rgb_limited; } diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c index e08684e34078ab..91b37b76618d20 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c @@ -2622,11 +2622,22 @@ static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state) return true; } +/* + * Display WA #22010492432: tgl + * Program half of the nominal DCO divider fraction value. + */ +static bool +tgl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915) +{ + return IS_TIGERLAKE(i915) && i915->dpll.ref_clks.nssc == 38400; +} + static int __cnl_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv, const struct intel_shared_dpll *pll, int ref_clock) { const struct intel_dpll_hw_state *pll_state = &pll->state.hw_state; + u32 dco_fraction; u32 p0, p1, p2, dco_freq; p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK; @@ -2669,8 +2680,13 @@ static int __cnl_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv, dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) * ref_clock; - dco_freq += (((pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >> - DPLL_CFGCR0_DCO_FRACTION_SHIFT) * ref_clock) / 0x8000; + dco_fraction = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >> + DPLL_CFGCR0_DCO_FRACTION_SHIFT; + + if (tgl_combo_pll_div_frac_wa_needed(dev_priv)) + dco_fraction *= 2; + + dco_freq += (dco_fraction * ref_clock) / 0x8000; if (drm_WARN_ON(&dev_priv->drm, p0 == 0 || p1 == 0 || p2 == 0)) return 0; @@ -2948,16 +2964,6 @@ static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = { /* the following params are unused */ }; -/* - * Display WA #22010492432: tgl - * Divide the nominal .dco_fraction value by 2. - */ -static const struct skl_wrpll_params tgl_tbt_pll_38_4MHz_values = { - .dco_integer = 0x54, .dco_fraction = 0x1800, - /* the following params are unused */ - .pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0, -}; - static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state, struct skl_wrpll_params *pll_params) { @@ -2991,14 +2997,12 @@ static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state, MISSING_CASE(dev_priv->dpll.ref_clks.nssc); fallthrough; case 19200: + case 38400: *pll_params = tgl_tbt_pll_19_2MHz_values; break; case 24000: *pll_params = tgl_tbt_pll_24MHz_values; break; - case 38400: - *pll_params = tgl_tbt_pll_38_4MHz_values; - break; } } else { switch (dev_priv->dpll.ref_clks.nssc) { @@ -3065,9 +3069,14 @@ static void icl_calc_dpll_state(struct drm_i915_private *i915, const struct skl_wrpll_params *pll_params, struct intel_dpll_hw_state *pll_state) { + u32 dco_fraction = pll_params->dco_fraction; + memset(pll_state, 0, sizeof(*pll_state)); - pll_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(pll_params->dco_fraction) | + if (tgl_combo_pll_div_frac_wa_needed(i915)) + dco_fraction = DIV_ROUND_CLOSEST(dco_fraction, 2); + + pll_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(dco_fraction) | pll_params->dco_integer; pll_state->cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params->qdiv_ratio) | diff --git a/drivers/gpu/drm/meson/meson_viu.c b/drivers/gpu/drm/meson/meson_viu.c index aede0c67a57f09..1287444b1bc83f 100644 --- a/drivers/gpu/drm/meson/meson_viu.c +++ b/drivers/gpu/drm/meson/meson_viu.c @@ -425,9 +425,12 @@ void meson_viu_init(struct meson_drm *priv) if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) || meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL)) meson_viu_load_matrix(priv); +#if 0 + /* FIXME: */ else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) meson_viu_set_g12a_osd1_matrix(priv, RGB709_to_YUV709l_coeff, true); +#endif /* Initialize OSD1 fifo control register */ reg = VIU_OSD_DDR_PRIORITY_URGENT | diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c index 1c6b78ad5ade46..b61bf53ec07afc 100644 --- a/drivers/i3c/master.c +++ b/drivers/i3c/master.c @@ -2537,7 +2537,7 @@ int i3c_master_register(struct i3c_master_controller *master, ret = i3c_master_bus_init(master); if (ret) - goto err_put_dev; + goto err_destroy_wq; ret = device_add(&master->dev); if (ret) @@ -2568,6 +2568,9 @@ int i3c_master_register(struct i3c_master_controller *master, err_cleanup_bus: i3c_master_bus_cleanup(master); +err_destroy_wq: + destroy_workqueue(master->wq); + err_put_dev: put_device(&master->dev); diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index 4a041511b70ec2..76b9c436edcd2f 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -1177,25 +1177,6 @@ static int assign_name(struct ib_device *device, const char *name) return ret; } -static void setup_dma_device(struct ib_device *device, - struct device *dma_device) -{ - /* - * If the caller does not provide a DMA capable device then the IB - * device will be used. In this case the caller should fully setup the - * ibdev for DMA. This usually means using dma_virt_ops. - */ -#ifdef CONFIG_DMA_VIRT_OPS - if (!dma_device) { - device->dev.dma_ops = &dma_virt_ops; - dma_device = &device->dev; - } -#endif - WARN_ON(!dma_device); - device->dma_device = dma_device; - WARN_ON(!device->dma_device->dma_parms); -} - /* * setup_device() allocates memory and sets up data that requires calling the * device ops, this is the only reason these actions are not done during @@ -1341,7 +1322,14 @@ int ib_register_device(struct ib_device *device, const char *name, if (ret) return ret; - setup_dma_device(device, dma_device); + /* + * If the caller does not provide a DMA capable device then the IB core + * will set up ib_sge and scatterlist structures that stash the kernel + * virtual address into the address field. + */ + WARN_ON(dma_device && !dma_device->dma_parms); + device->dma_device = dma_device; + ret = setup_device(device); if (ret) return ret; @@ -2676,6 +2664,21 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops) } EXPORT_SYMBOL(ib_set_device_ops); +#ifdef CONFIG_INFINIBAND_VIRT_DMA +int ib_dma_virt_map_sg(struct ib_device *dev, struct scatterlist *sg, int nents) +{ + struct scatterlist *s; + int i; + + for_each_sg(sg, s, nents, i) { + sg_dma_address(s) = (uintptr_t)sg_virt(s); + sg_dma_len(s) = s->length; + } + return nents; +} +EXPORT_SYMBOL(ib_dma_virt_map_sg); +#endif /* CONFIG_INFINIBAND_VIRT_DMA */ + static const struct rdma_nl_cbs ibnl_ls_cb_table[RDMA_NL_LS_NUM_OPS] = { [RDMA_NL_LS_OP_RESOLVE] = { .doit = ib_nl_handle_resolve_resp, diff --git a/drivers/infiniband/core/rw.c b/drivers/infiniband/core/rw.c index 13f43ab7220b05..a96030b784eb21 100644 --- a/drivers/infiniband/core/rw.c +++ b/drivers/infiniband/core/rw.c @@ -285,8 +285,11 @@ static void rdma_rw_unmap_sg(struct ib_device *dev, struct scatterlist *sg, static int rdma_rw_map_sg(struct ib_device *dev, struct scatterlist *sg, u32 sg_cnt, enum dma_data_direction dir) { - if (is_pci_p2pdma_page(sg_page(sg))) + if (is_pci_p2pdma_page(sg_page(sg))) { + if (WARN_ON_ONCE(ib_uses_virt_dma(dev))) + return 0; return pci_p2pdma_map_sg(dev->dma_device, sg, sg_cnt, dir); + } return ib_dma_map_sg(dev, sg, sg_cnt, dir); } diff --git a/drivers/infiniband/sw/rdmavt/Kconfig b/drivers/infiniband/sw/rdmavt/Kconfig index c8e268082952b0..0df48b3a6b56c5 100644 --- a/drivers/infiniband/sw/rdmavt/Kconfig +++ b/drivers/infiniband/sw/rdmavt/Kconfig @@ -4,6 +4,5 @@ config INFINIBAND_RDMAVT depends on INFINIBAND_VIRT_DMA depends on X86_64 depends on PCI - select DMA_VIRT_OPS help This is a common software verbs provider for RDMA networks. diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c index 8490fdb9c91e50..90fc234f489acd 100644 --- a/drivers/infiniband/sw/rdmavt/mr.c +++ b/drivers/infiniband/sw/rdmavt/mr.c @@ -324,8 +324,6 @@ static void __rvt_free_mr(struct rvt_mr *mr) * @acc: access flags * * Return: the memory region on success, otherwise returns an errno. - * Note that all DMA addresses should be created via the functions in - * struct dma_virt_ops. */ struct ib_mr *rvt_get_dma_mr(struct ib_pd *pd, int acc) { @@ -766,7 +764,7 @@ int rvt_lkey_ok(struct rvt_lkey_table *rkt, struct rvt_pd *pd, /* * We use LKEY == zero for kernel virtual addresses - * (see rvt_get_dma_mr() and dma_virt_ops). + * (see rvt_get_dma_mr()). */ if (sge->lkey == 0) { struct rvt_dev_info *dev = ib_to_rvt(pd->ibpd.device); @@ -877,7 +875,7 @@ int rvt_rkey_ok(struct rvt_qp *qp, struct rvt_sge *sge, /* * We use RKEY == zero for kernel virtual addresses - * (see rvt_get_dma_mr() and dma_virt_ops). + * (see rvt_get_dma_mr()). */ rcu_read_lock(); if (rkey == 0) { diff --git a/drivers/infiniband/sw/rdmavt/vt.c b/drivers/infiniband/sw/rdmavt/vt.c index 670a9623b46e11..d1bbe66610cfe4 100644 --- a/drivers/infiniband/sw/rdmavt/vt.c +++ b/drivers/infiniband/sw/rdmavt/vt.c @@ -524,7 +524,6 @@ static noinline int check_support(struct rvt_dev_info *rdi, int verb) int rvt_register_device(struct rvt_dev_info *rdi) { int ret = 0, i; - u64 dma_mask; if (!rdi) return -EINVAL; @@ -579,13 +578,6 @@ int rvt_register_device(struct rvt_dev_info *rdi) /* Completion queues */ spin_lock_init(&rdi->n_cqs_lock); - /* DMA Operations */ - rdi->ibdev.dev.dma_parms = rdi->ibdev.dev.parent->dma_parms; - dma_mask = IS_ENABLED(CONFIG_64BIT) ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32); - ret = dma_coerce_mask_and_coherent(&rdi->ibdev.dev, dma_mask); - if (ret) - goto bail_wss; - /* Protection Domain */ spin_lock_init(&rdi->n_pds_lock); rdi->n_pds_allocated = 0; diff --git a/drivers/infiniband/sw/rxe/Kconfig b/drivers/infiniband/sw/rxe/Kconfig index 8810bfa680495a..4521490667925f 100644 --- a/drivers/infiniband/sw/rxe/Kconfig +++ b/drivers/infiniband/sw/rxe/Kconfig @@ -5,7 +5,6 @@ config RDMA_RXE depends on INFINIBAND_VIRT_DMA select NET_UDP_TUNNEL select CRYPTO_CRC32 - select DMA_VIRT_OPS help This driver implements the InfiniBand RDMA transport over the Linux network stack. It enables a system with a diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c index 34bef7d8e6b41b..943914c2a50c70 100644 --- a/drivers/infiniband/sw/rxe/rxe_net.c +++ b/drivers/infiniband/sw/rxe/rxe_net.c @@ -20,18 +20,6 @@ static struct rxe_recv_sockets recv_sockets; -struct device *rxe_dma_device(struct rxe_dev *rxe) -{ - struct net_device *ndev; - - ndev = rxe->ndev; - - if (is_vlan_dev(ndev)) - ndev = vlan_dev_real_dev(ndev); - - return ndev->dev.parent; -} - int rxe_mcast_add(struct rxe_dev *rxe, union ib_gid *mgid) { int err; diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c index f9c832e82552f9..512868c2302383 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.c +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c @@ -1118,23 +1118,15 @@ int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name) int err; struct ib_device *dev = &rxe->ib_dev; struct crypto_shash *tfm; - u64 dma_mask; strlcpy(dev->node_desc, "rxe", sizeof(dev->node_desc)); dev->node_type = RDMA_NODE_IB_CA; dev->phys_port_cnt = 1; dev->num_comp_vectors = num_possible_cpus(); - dev->dev.parent = rxe_dma_device(rxe); dev->local_dma_lkey = 0; addrconf_addr_eui48((unsigned char *)&dev->node_guid, rxe->ndev->dev_addr); - dev->dev.dma_parms = &rxe->dma_parms; - dma_set_max_seg_size(&dev->dev, UINT_MAX); - dma_mask = IS_ENABLED(CONFIG_64BIT) ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32); - err = dma_coerce_mask_and_coherent(&dev->dev, dma_mask); - if (err) - return err; dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT) | BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h index 3414b341b7091f..4bf5d85a1ab3ce 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.h +++ b/drivers/infiniband/sw/rxe/rxe_verbs.h @@ -352,7 +352,6 @@ struct rxe_port { struct rxe_dev { struct ib_device ib_dev; struct ib_device_attr attr; - struct device_dma_parameters dma_parms; int max_ucontext; int max_inline_data; struct mutex usdev_lock; diff --git a/drivers/infiniband/sw/siw/Kconfig b/drivers/infiniband/sw/siw/Kconfig index 3450ba5081df51..1b5105cbabaeed 100644 --- a/drivers/infiniband/sw/siw/Kconfig +++ b/drivers/infiniband/sw/siw/Kconfig @@ -2,7 +2,6 @@ config RDMA_SIW tristate "Software RDMA over TCP/IP (iWARP) driver" depends on INET && INFINIBAND && LIBCRC32C depends on INFINIBAND_VIRT_DMA - select DMA_VIRT_OPS help This driver implements the iWARP RDMA transport over the Linux TCP/IP network stack. It enables a system with a diff --git a/drivers/infiniband/sw/siw/siw.h b/drivers/infiniband/sw/siw/siw.h index e9753831ac3f33..adda7899621962 100644 --- a/drivers/infiniband/sw/siw/siw.h +++ b/drivers/infiniband/sw/siw/siw.h @@ -69,7 +69,6 @@ struct siw_pd { struct siw_device { struct ib_device base_dev; - struct device_dma_parameters dma_parms; struct net_device *netdev; struct siw_dev_cap attrs; diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c index 181e06c1c43d7e..9d152e198a59bf 100644 --- a/drivers/infiniband/sw/siw/siw_main.c +++ b/drivers/infiniband/sw/siw/siw_main.c @@ -305,25 +305,8 @@ static struct siw_device *siw_device_create(struct net_device *netdev) { struct siw_device *sdev = NULL; struct ib_device *base_dev; - struct device *parent = netdev->dev.parent; - u64 dma_mask; int rv; - if (!parent) { - /* - * The loopback device has no parent device, - * so it appears as a top-level device. To support - * loopback device connectivity, take this device - * as the parent device. Skip all other devices - * w/o parent device. - */ - if (netdev->type != ARPHRD_LOOPBACK) { - pr_warn("siw: device %s error: no parent device\n", - netdev->name); - return NULL; - } - parent = &netdev->dev; - } sdev = ib_alloc_device(siw_device, base_dev); if (!sdev) return NULL; @@ -382,13 +365,6 @@ static struct siw_device *siw_device_create(struct net_device *netdev) * per physical port. */ base_dev->phys_port_cnt = 1; - base_dev->dev.parent = parent; - base_dev->dev.dma_parms = &sdev->dma_parms; - dma_set_max_seg_size(&base_dev->dev, UINT_MAX); - dma_mask = IS_ENABLED(CONFIG_64BIT) ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32); - if (dma_coerce_mask_and_coherent(&base_dev->dev, dma_mask)) - goto error; - base_dev->num_comp_vectors = num_possible_cpus(); xa_init_flags(&sdev->qp_xa, XA_FLAGS_ALLOC1); @@ -430,7 +406,7 @@ static struct siw_device *siw_device_create(struct net_device *netdev) atomic_set(&sdev->num_mr, 0); atomic_set(&sdev->num_pd, 0); - sdev->numa_node = dev_to_node(parent); + sdev->numa_node = dev_to_node(&netdev->dev); spin_lock_init(&sdev->lock); return sdev; diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c index f74982dcbea0dd..6b8e5bdd8526d9 100644 --- a/drivers/md/dm-verity-target.c +++ b/drivers/md/dm-verity-target.c @@ -537,6 +537,15 @@ static int verity_verify_io(struct dm_verity_io *io) return 0; } +/* + * Skip verity work in response to I/O error when system is shutting down. + */ +static inline bool verity_is_system_shutting_down(void) +{ + return system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF + || system_state == SYSTEM_RESTART; +} + /* * End one "io" structure with a given error. */ @@ -564,7 +573,8 @@ static void verity_end_io(struct bio *bio) { struct dm_verity_io *io = bio->bi_private; - if (bio->bi_status && !verity_fec_is_enabled(io->v)) { + if (bio->bi_status && + (!verity_fec_is_enabled(io->v) || verity_is_system_shutting_down())) { verity_finish_io(io, bio->bi_status); return; } diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 3b598a3cb462af..9f9d8b67b5dd18 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1128,7 +1128,7 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio, struct md_rdev *err_rdev = NULL; gfp_t gfp = GFP_NOIO; - if (r10_bio->devs[slot].rdev) { + if (slot >= 0 && r10_bio->devs[slot].rdev) { /* * This is an error retry, but we cannot * safely dereference the rdev in the r10_bio, @@ -1493,6 +1493,7 @@ static void __make_request(struct mddev *mddev, struct bio *bio, int sectors) r10_bio->mddev = mddev; r10_bio->sector = bio->bi_iter.bi_sector; r10_bio->state = 0; + r10_bio->read_slot = -1; memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) * conf->copies); if (bio_data_dir(bio) == READ) diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c index 8c39528be39e53..7550721bd0bb01 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c @@ -858,7 +858,7 @@ static int s5p_mfc_open(struct file *file) * We'll do mostly sequential access, so sacrifice TLB efficiency for * faster allocation. */ - q->dma_attrs = DMA_ATTR_ALLOC_SINGLE_PAGES | DMA_ATTR_NON_CONSISTENT | DMA_ATTR_NO_KERNEL_MAPPING; + q->dma_attrs = DMA_ATTR_ALLOC_SINGLE_PAGES | DMA_ATTR_NO_KERNEL_MAPPING; q->mem_ops = &vb2_dma_contig_memops; q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; ret = vb2_queue_init(q); @@ -893,7 +893,7 @@ static int s5p_mfc_open(struct file *file) * We'll do mostly sequential access, so sacrifice TLB efficiency for * faster allocation. */ - q->dma_attrs = DMA_ATTR_ALLOC_SINGLE_PAGES | DMA_ATTR_NON_CONSISTENT | DMA_ATTR_NO_KERNEL_MAPPING; + q->dma_attrs = DMA_ATTR_ALLOC_SINGLE_PAGES | DMA_ATTR_NO_KERNEL_MAPPING; q->mem_ops = &vb2_dma_contig_memops; q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; ret = vb2_queue_init(q); diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c index 61e144a3520101..c60d90faf7e74c 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c @@ -600,8 +600,8 @@ static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf) struct s5p_mfc_ctx *ctx = fh_to_ctx(priv); if (ctx->state == MFCINST_ERROR) { - mfc_err("Call on QBUF after unrecoverable error\n"); - return -EIO; + //mfc_err("Call on QBUF after unrecoverable error\n"); + return -EAGAIN; } if (buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) return vb2_qbuf(&ctx->vq_src, NULL, buf); @@ -620,8 +620,8 @@ static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf) int ret; if (ctx->state == MFCINST_ERROR) { - mfc_err_limited("Call on DQBUF after unrecoverable error\n"); - return -EIO; + //mfc_err_limited("Call on DQBUF after unrecoverable error\n"); + return -EAGAIN; } switch (buf->type) { diff --git a/drivers/media/usb/dvb-usb/gp8psk.c b/drivers/media/usb/dvb-usb/gp8psk.c index c07f46f5176ea7..b4f661bb564816 100644 --- a/drivers/media/usb/dvb-usb/gp8psk.c +++ b/drivers/media/usb/dvb-usb/gp8psk.c @@ -182,7 +182,7 @@ static int gp8psk_load_bcm4500fw(struct dvb_usb_device *d) static int gp8psk_power_ctrl(struct dvb_usb_device *d, int onoff) { - u8 status, buf; + u8 status = 0, buf; int gp_product_id = le16_to_cpu(d->udev->descriptor.idProduct); if (onoff) { diff --git a/drivers/misc/vmw_vmci/vmci_context.c b/drivers/misc/vmw_vmci/vmci_context.c index 16695366ec926d..26ff49fdf0f7d3 100644 --- a/drivers/misc/vmw_vmci/vmci_context.c +++ b/drivers/misc/vmw_vmci/vmci_context.c @@ -743,7 +743,7 @@ static int vmci_ctx_get_chkpt_doorbells(struct vmci_ctx *context, return VMCI_ERROR_MORE_DATA; } - dbells = kmalloc(data_size, GFP_ATOMIC); + dbells = kzalloc(data_size, GFP_ATOMIC); if (!dbells) return VMCI_ERROR_NO_MEM; diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c index 7900571fc85b32..c352217946455c 100644 --- a/drivers/mtd/nand/spi/core.c +++ b/drivers/mtd/nand/spi/core.c @@ -318,10 +318,6 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand, buf += ret; } - if (req->ooblen) - memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs, - req->ooblen); - return 0; } diff --git a/drivers/net/wireless/marvell/mwifiex/join.c b/drivers/net/wireless/marvell/mwifiex/join.c index 5934f71475477e..173ccf79cbfcc8 100644 --- a/drivers/net/wireless/marvell/mwifiex/join.c +++ b/drivers/net/wireless/marvell/mwifiex/join.c @@ -877,6 +877,8 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv, memset(adhoc_start->ssid, 0, IEEE80211_MAX_SSID_LEN); + if (req_ssid->ssid_len > IEEE80211_MAX_SSID_LEN) + req_ssid->ssid_len = IEEE80211_MAX_SSID_LEN; memcpy(adhoc_start->ssid, req_ssid->ssid, req_ssid->ssid_len); mwifiex_dbg(adapter, INFO, "info: ADHOC_S_CMD: SSID = %s\n", diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index ae6620489457d6..5c1e7cb7fe0dee 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -414,7 +414,8 @@ static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev, if (ib_dma_mapping_error(ndev->device, r->send_sge.addr)) goto out_free_rsp; - r->req.p2p_client = &ndev->device->dev; + if (!ib_uses_virt_dma(ndev->device)) + r->req.p2p_client = &ndev->device->dev; r->send_sge.length = sizeof(*r->req.cqe); r->send_sge.lkey = ndev->pd->local_dma_lkey; diff --git a/drivers/opp/core.c b/drivers/opp/core.c index 0e0a5269dc82f8..903b465c8568b7 100644 --- a/drivers/opp/core.c +++ b/drivers/opp/core.c @@ -1102,7 +1102,7 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index) if (IS_ERR(opp_table->clk)) { ret = PTR_ERR(opp_table->clk); if (ret == -EPROBE_DEFER) - goto err; + goto remove_opp_dev; dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__, ret); } @@ -1111,7 +1111,7 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index) ret = dev_pm_opp_of_find_icc_paths(dev, opp_table); if (ret) { if (ret == -EPROBE_DEFER) - goto err; + goto put_clk; dev_warn(dev, "%s: Error finding interconnect paths: %d\n", __func__, ret); @@ -1125,6 +1125,11 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index) list_add(&opp_table->node, &opp_tables); return opp_table; +put_clk: + if (!IS_ERR(opp_table->clk)) + clk_put(opp_table->clk); +remove_opp_dev: + _remove_opp_dev(opp_dev, opp_table); err: kfree(opp_table); return ERR_PTR(ret); diff --git a/drivers/rtc/rtc-pcf2127.c b/drivers/rtc/rtc-pcf2127.c index 4d9711d51f8f31..f0a6861ff3aef3 100644 --- a/drivers/rtc/rtc-pcf2127.c +++ b/drivers/rtc/rtc-pcf2127.c @@ -331,6 +331,37 @@ static const struct watchdog_ops pcf2127_watchdog_ops = { .set_timeout = pcf2127_wdt_set_timeout, }; +static int pcf2127_watchdog_init(struct device *dev, struct pcf2127 *pcf2127) +{ + u32 wdd_timeout; + int ret; + + if (!IS_ENABLED(CONFIG_WATCHDOG) || + !device_property_read_bool(dev, "reset-source")) + return 0; + + pcf2127->wdd.parent = dev; + pcf2127->wdd.info = &pcf2127_wdt_info; + pcf2127->wdd.ops = &pcf2127_watchdog_ops; + pcf2127->wdd.min_timeout = PCF2127_WD_VAL_MIN; + pcf2127->wdd.max_timeout = PCF2127_WD_VAL_MAX; + pcf2127->wdd.timeout = PCF2127_WD_VAL_DEFAULT; + pcf2127->wdd.min_hw_heartbeat_ms = 500; + pcf2127->wdd.status = WATCHDOG_NOWAYOUT_INIT_STATUS; + + watchdog_set_drvdata(&pcf2127->wdd, pcf2127); + + /* Test if watchdog timer is started by bootloader */ + ret = regmap_read(pcf2127->regmap, PCF2127_REG_WD_VAL, &wdd_timeout); + if (ret) + return ret; + + if (wdd_timeout) + set_bit(WDOG_HW_RUNNING, &pcf2127->wdd.status); + + return devm_watchdog_register_device(dev, &pcf2127->wdd); +} + /* Alarm */ static int pcf2127_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) { @@ -532,7 +563,6 @@ static int pcf2127_probe(struct device *dev, struct regmap *regmap, int alarm_irq, const char *name, bool has_nvmem) { struct pcf2127 *pcf2127; - u32 wdd_timeout; int ret = 0; dev_dbg(dev, "%s\n", __func__); @@ -571,17 +601,6 @@ static int pcf2127_probe(struct device *dev, struct regmap *regmap, pcf2127->rtc->ops = &pcf2127_rtc_alrm_ops; } - pcf2127->wdd.parent = dev; - pcf2127->wdd.info = &pcf2127_wdt_info; - pcf2127->wdd.ops = &pcf2127_watchdog_ops; - pcf2127->wdd.min_timeout = PCF2127_WD_VAL_MIN; - pcf2127->wdd.max_timeout = PCF2127_WD_VAL_MAX; - pcf2127->wdd.timeout = PCF2127_WD_VAL_DEFAULT; - pcf2127->wdd.min_hw_heartbeat_ms = 500; - pcf2127->wdd.status = WATCHDOG_NOWAYOUT_INIT_STATUS; - - watchdog_set_drvdata(&pcf2127->wdd, pcf2127); - if (has_nvmem) { struct nvmem_config nvmem_cfg = { .priv = pcf2127, @@ -611,19 +630,7 @@ static int pcf2127_probe(struct device *dev, struct regmap *regmap, return ret; } - /* Test if watchdog timer is started by bootloader */ - ret = regmap_read(pcf2127->regmap, PCF2127_REG_WD_VAL, &wdd_timeout); - if (ret) - return ret; - - if (wdd_timeout) - set_bit(WDOG_HW_RUNNING, &pcf2127->wdd.status); - -#ifdef CONFIG_WATCHDOG - ret = devm_watchdog_register_device(dev, &pcf2127->wdd); - if (ret) - return ret; -#endif /* CONFIG_WATCHDOG */ + pcf2127_watchdog_init(dev, pcf2127); /* * Disable battery low/switch-over timestamp and interrupts. diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c index c6b89273feba81..d4b2ab7861266e 100644 --- a/drivers/rtc/rtc-pl031.c +++ b/drivers/rtc/rtc-pl031.c @@ -361,8 +361,10 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id) device_init_wakeup(&adev->dev, true); ldata->rtc = devm_rtc_allocate_device(&adev->dev); - if (IS_ERR(ldata->rtc)) - return PTR_ERR(ldata->rtc); + if (IS_ERR(ldata->rtc)) { + ret = PTR_ERR(ldata->rtc); + goto out; + } ldata->rtc->ops = ops; ldata->rtc->range_min = vendor->range_min; diff --git a/drivers/rtc/rtc-sun6i.c b/drivers/rtc/rtc-sun6i.c index e2b8b150bcb448..f2818cdd11d82e 100644 --- a/drivers/rtc/rtc-sun6i.c +++ b/drivers/rtc/rtc-sun6i.c @@ -272,7 +272,7 @@ static void __init sun6i_rtc_clk_init(struct device_node *node, 300000000); if (IS_ERR(rtc->int_osc)) { pr_crit("Couldn't register the internal oscillator\n"); - return; + goto err; } parents[0] = clk_hw_get_name(rtc->int_osc); @@ -290,7 +290,7 @@ static void __init sun6i_rtc_clk_init(struct device_node *node, rtc->losc = clk_register(NULL, &rtc->hw); if (IS_ERR(rtc->losc)) { pr_crit("Couldn't register the LOSC clock\n"); - return; + goto err_register; } of_property_read_string_index(node, "clock-output-names", 1, @@ -301,7 +301,7 @@ static void __init sun6i_rtc_clk_init(struct device_node *node, &rtc->lock); if (IS_ERR(rtc->ext_losc)) { pr_crit("Couldn't register the LOSC external gate\n"); - return; + goto err_register; } clk_data->num = 2; @@ -314,6 +314,8 @@ static void __init sun6i_rtc_clk_init(struct device_node *node, of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); return; +err_register: + clk_hw_unregister_fixed_rate(rtc->int_osc); err: kfree(clk_data); } diff --git a/drivers/scsi/cxgbi/cxgb4i/Kconfig b/drivers/scsi/cxgbi/cxgb4i/Kconfig index b206e266b4e726..8b0deece9758b8 100644 --- a/drivers/scsi/cxgbi/cxgb4i/Kconfig +++ b/drivers/scsi/cxgbi/cxgb4i/Kconfig @@ -4,6 +4,7 @@ config SCSI_CXGB4_ISCSI depends on PCI && INET && (IPV6 || IPV6=n) depends on THERMAL || !THERMAL depends on ETHERNET + depends on TLS || TLS=n select NET_VENDOR_CHELSIO select CHELSIO_T4 select CHELSIO_LIB diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c index 8df73bc2f8cb24..914a827a93ee8c 100644 --- a/drivers/scsi/ufs/ufs-mediatek.c +++ b/drivers/scsi/ufs/ufs-mediatek.c @@ -743,7 +743,7 @@ static int ufs_mtk_link_startup_notify(struct ufs_hba *hba, return ret; } -static void ufs_mtk_device_reset(struct ufs_hba *hba) +static int ufs_mtk_device_reset(struct ufs_hba *hba) { struct arm_smccc_res res; @@ -764,6 +764,8 @@ static void ufs_mtk_device_reset(struct ufs_hba *hba) usleep_range(10000, 15000); dev_info(hba->dev, "device reset done\n"); + + return 0; } static int ufs_mtk_link_set_hpm(struct ufs_hba *hba) diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c index f9d6ef3565407c..a244c8ae1b4eb3 100644 --- a/drivers/scsi/ufs/ufs-qcom.c +++ b/drivers/scsi/ufs/ufs-qcom.c @@ -1421,13 +1421,13 @@ static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba) * * Toggles the (optional) reset line to reset the attached device. */ -static void ufs_qcom_device_reset(struct ufs_hba *hba) +static int ufs_qcom_device_reset(struct ufs_hba *hba) { struct ufs_qcom_host *host = ufshcd_get_variant(hba); /* reset gpio is optional */ if (!host->device_reset) - return; + return -EOPNOTSUPP; /* * The UFS device shall detect reset pulses of 1us, sleep for 10us to @@ -1438,6 +1438,8 @@ static void ufs_qcom_device_reset(struct ufs_hba *hba) gpiod_set_value_cansleep(host->device_reset, 0); usleep_range(10, 15); + + return 0; } #if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND) diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index e0f00a42371c52..cd51553e522da8 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -318,7 +318,7 @@ struct ufs_hba_variant_ops { int (*resume)(struct ufs_hba *, enum ufs_pm_op); void (*dbg_register_dump)(struct ufs_hba *hba); int (*phy_initialization)(struct ufs_hba *); - void (*device_reset)(struct ufs_hba *hba); + int (*device_reset)(struct ufs_hba *hba); void (*config_scaling_param)(struct ufs_hba *hba, struct devfreq_dev_profile *profile, void *data); @@ -1181,9 +1181,17 @@ static inline void ufshcd_vops_dbg_register_dump(struct ufs_hba *hba) static inline void ufshcd_vops_device_reset(struct ufs_hba *hba) { if (hba->vops && hba->vops->device_reset) { - hba->vops->device_reset(hba); - ufshcd_set_ufs_dev_active(hba); - ufshcd_update_reg_hist(&hba->ufs_stats.dev_reset, 0); + int err = hba->vops->device_reset(hba); + + if (!err) { + ufshcd_set_ufs_dev_active(hba); + if (ufshcd_is_wb_allowed(hba)) { + hba->wb_enabled = false; + hba->wb_buf_flush_enabled = false; + } + } + if (err != -EOPNOTSUPP) + ufshcd_update_reg_hist(&hba->ufs_stats.dev_reset, err); } } diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 3fd16b7f615073..aadaea052f51d3 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -256,6 +256,7 @@ config SPI_DW_BT1 tristate "Baikal-T1 SPI driver for DW SPI core" depends on MIPS_BAIKAL_T1 || COMPILE_TEST select MULTIPLEXER + select MUX_MMIO help Baikal-T1 SoC is equipped with three DW APB SSI-based MMIO SPI controllers. Two of them are pretty much normal: with IRQ, DMA, @@ -269,8 +270,6 @@ config SPI_DW_BT1 config SPI_DW_BT1_DIRMAP bool "Directly mapped Baikal-T1 Boot SPI flash support" depends on SPI_DW_BT1 - select MULTIPLEXER - select MUX_MMIO help Directly mapped SPI flash memory is an interface specific to the Baikal-T1 System Boot Controller. It is a 16MB MMIO region, which diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig index 427a993c7f576c..c844d513537e27 100644 --- a/drivers/video/Kconfig +++ b/drivers/video/Kconfig @@ -19,6 +19,8 @@ source "drivers/gpu/ipu-v3/Kconfig" source "drivers/gpu/drm/Kconfig" +source "drivers/gpu/arm/Kconfig" + menu "Frame buffer Devices" source "drivers/video/fbdev/Kconfig" endmenu diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index cef437817b0dc6..8d1ae973041aeb 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -1033,7 +1033,7 @@ static void fbcon_init(struct vc_data *vc, int init) struct vc_data *svc = *default_mode; struct fbcon_display *t, *p = &fb_display[vc->vc_num]; int logo = 1, new_rows, new_cols, rows, cols, charcnt = 256; - int cap, ret; + int ret; if (WARN_ON(info_idx == -1)) return; @@ -1042,7 +1042,6 @@ static void fbcon_init(struct vc_data *vc, int init) con2fb_map[vc->vc_num] = info_idx; info = registered_fb[con2fb_map[vc->vc_num]]; - cap = info->flags; if (logo_shown < 0 && console_loglevel <= CONSOLE_LOGLEVEL_QUIET) logo_shown = FBCON_LOGO_DONTSHOW; @@ -1147,11 +1146,13 @@ static void fbcon_init(struct vc_data *vc, int init) ops->graphics = 0; - if ((cap & FBINFO_HWACCEL_COPYAREA) && - !(cap & FBINFO_HWACCEL_DISABLED)) - p->scrollmode = SCROLL_MOVE; - else /* default to something safe */ - p->scrollmode = SCROLL_REDRAW; + /* + * No more hw acceleration for fbcon. + * + * FIXME: Garbage collect all the now dead code after sufficient time + * has passed. + */ + p->scrollmode = SCROLL_REDRAW; /* * ++guenther: console.c:vc_allocate() relies on initializing @@ -1961,45 +1962,15 @@ static void updatescrollmode(struct fbcon_display *p, { struct fbcon_ops *ops = info->fbcon_par; int fh = vc->vc_font.height; - int cap = info->flags; - u16 t = 0; - int ypan = FBCON_SWAP(ops->rotate, info->fix.ypanstep, - info->fix.xpanstep); - int ywrap = FBCON_SWAP(ops->rotate, info->fix.ywrapstep, t); int yres = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres); int vyres = FBCON_SWAP(ops->rotate, info->var.yres_virtual, info->var.xres_virtual); - int good_pan = (cap & FBINFO_HWACCEL_YPAN) && - divides(ypan, vc->vc_font.height) && vyres > yres; - int good_wrap = (cap & FBINFO_HWACCEL_YWRAP) && - divides(ywrap, vc->vc_font.height) && - divides(vc->vc_font.height, vyres) && - divides(vc->vc_font.height, yres); - int reading_fast = cap & FBINFO_READS_FAST; - int fast_copyarea = (cap & FBINFO_HWACCEL_COPYAREA) && - !(cap & FBINFO_HWACCEL_DISABLED); - int fast_imageblit = (cap & FBINFO_HWACCEL_IMAGEBLIT) && - !(cap & FBINFO_HWACCEL_DISABLED); p->vrows = vyres/fh; if (yres > (fh * (vc->vc_rows + 1))) p->vrows -= (yres - (fh * vc->vc_rows)) / fh; if ((yres % fh) && (vyres % fh < yres % fh)) p->vrows--; - - if (good_wrap || good_pan) { - if (reading_fast || fast_copyarea) - p->scrollmode = good_wrap ? - SCROLL_WRAP_MOVE : SCROLL_PAN_MOVE; - else - p->scrollmode = good_wrap ? SCROLL_REDRAW : - SCROLL_PAN_REDRAW; - } else { - if (reading_fast || (fast_copyarea && !fast_imageblit)) - p->scrollmode = SCROLL_MOVE; - else - p->scrollmode = SCROLL_REDRAW; - } } #define PITCH(w) (((w) + 7) >> 3) diff --git a/drivers/watchdog/rti_wdt.c b/drivers/watchdog/rti_wdt.c index 836319cbaca9d7..359302f71f7efe 100644 --- a/drivers/watchdog/rti_wdt.c +++ b/drivers/watchdog/rti_wdt.c @@ -227,8 +227,10 @@ static int rti_wdt_probe(struct platform_device *pdev) pm_runtime_enable(dev); ret = pm_runtime_get_sync(dev); - if (ret) + if (ret) { + pm_runtime_put_noidle(dev); return dev_err_probe(dev, ret, "runtime pm failed\n"); + } platform_set_drvdata(pdev, wdt); diff --git a/firmware/edid/480x272.bin b/firmware/edid/480x272.bin new file mode 100644 index 00000000000000..a1cb28e133d990 Binary files /dev/null and b/firmware/edid/480x272.bin differ diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c index 3ac7611ef7ce22..fd691e4815c564 100644 --- a/fs/bfs/inode.c +++ b/fs/bfs/inode.c @@ -350,7 +350,7 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent) info->si_lasti = (le32_to_cpu(bfs_sb->s_start) - BFS_BSIZE) / sizeof(struct bfs_inode) + BFS_ROOT_INO - 1; if (info->si_lasti == BFS_MAX_LASTI) - printf("WARNING: filesystem %s was created with 512 inodes, the real maximum is 511, mounting anyway\n", s->s_id); + printf("NOTE: filesystem %s was created with 512 inodes, the real maximum is 511, mounting anyway\n", s->s_id); else if (info->si_lasti > BFS_MAX_LASTI) { printf("Impossible last inode number %lu > %d on %s\n", info->si_lasti, BFS_MAX_LASTI, s->s_id); goto out1; diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index 526faf4778ce43..2462a9a84b9567 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -1335,6 +1335,8 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req) in, ceph_vinop(in)); if (in->i_state & I_NEW) discard_new_inode(in); + else + iput(in); goto done; } req->r_target_inode = in; diff --git a/fs/exec.c b/fs/exec.c index 547a2390baf54f..ca89e0e3ef10f1 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -965,8 +965,8 @@ EXPORT_SYMBOL(read_code); /* * Maps the mm_struct mm into the current task struct. - * On success, this function returns with the mutex - * exec_update_mutex locked. + * On success, this function returns with exec_update_lock + * held for writing. */ static int exec_mmap(struct mm_struct *mm) { @@ -981,7 +981,7 @@ static int exec_mmap(struct mm_struct *mm) if (old_mm) sync_mm_rss(old_mm); - ret = mutex_lock_killable(&tsk->signal->exec_update_mutex); + ret = down_write_killable(&tsk->signal->exec_update_lock); if (ret) return ret; @@ -995,7 +995,7 @@ static int exec_mmap(struct mm_struct *mm) mmap_read_lock(old_mm); if (unlikely(old_mm->core_state)) { mmap_read_unlock(old_mm); - mutex_unlock(&tsk->signal->exec_update_mutex); + up_write(&tsk->signal->exec_update_lock); return -EINTR; } } @@ -1382,7 +1382,7 @@ int begin_new_exec(struct linux_binprm * bprm) return 0; out_unlock: - mutex_unlock(&me->signal->exec_update_mutex); + up_write(&me->signal->exec_update_lock); out: return retval; } @@ -1423,7 +1423,7 @@ void setup_new_exec(struct linux_binprm * bprm) * some architectures like powerpc */ me->mm->task_size = TASK_SIZE; - mutex_unlock(&me->signal->exec_update_mutex); + up_write(&me->signal->exec_update_lock); mutex_unlock(&me->signal->cred_guard_mutex); } EXPORT_SYMBOL(setup_new_exec); diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 37a619bf1ac7ca..e67d5de6f28ca6 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -2395,9 +2395,9 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac) nr = sbi->s_mb_prefetch; if (ext4_has_feature_flex_bg(sb)) { - nr = (group / sbi->s_mb_prefetch) * - sbi->s_mb_prefetch; - nr = nr + sbi->s_mb_prefetch - group; + nr = 1 << sbi->s_log_groups_per_flex; + nr -= group & (nr - 1); + nr = min(nr, sbi->s_mb_prefetch); } prefetch_grp = ext4_mb_prefetch(sb, group, nr, &prefetch_ios); @@ -2733,7 +2733,8 @@ static int ext4_mb_init_backend(struct super_block *sb) if (ext4_has_feature_flex_bg(sb)) { /* a single flex group is supposed to be read by a single IO */ - sbi->s_mb_prefetch = 1 << sbi->s_es->s_log_groups_per_flex; + sbi->s_mb_prefetch = min(1 << sbi->s_es->s_log_groups_per_flex, + BLK_MAX_SEGMENT_SIZE >> (sb->s_blocksize_bits - 9)); sbi->s_mb_prefetch *= 8; /* 8 prefetch IOs in flight at most */ } else { sbi->s_mb_prefetch = 32; diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 2b08b162075c3d..ea5aefa23a20a6 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -4186,18 +4186,25 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) */ sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT; - blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size); - - if (blocksize == PAGE_SIZE) - set_opt(sb, DIOREAD_NOLOCK); - - if (blocksize < EXT4_MIN_BLOCK_SIZE || - blocksize > EXT4_MAX_BLOCK_SIZE) { + if (le32_to_cpu(es->s_log_block_size) > + (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) { ext4_msg(sb, KERN_ERR, - "Unsupported filesystem blocksize %d (%d log_block_size)", - blocksize, le32_to_cpu(es->s_log_block_size)); + "Invalid log block size: %u", + le32_to_cpu(es->s_log_block_size)); goto failed_mount; } + if (le32_to_cpu(es->s_log_cluster_size) > + (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) { + ext4_msg(sb, KERN_ERR, + "Invalid log cluster size: %u", + le32_to_cpu(es->s_log_cluster_size)); + goto failed_mount; + } + + blocksize = EXT4_MIN_BLOCK_SIZE << le32_to_cpu(es->s_log_block_size); + + if (blocksize == PAGE_SIZE) + set_opt(sb, DIOREAD_NOLOCK); if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV) { sbi->s_inode_size = EXT4_GOOD_OLD_INODE_SIZE; @@ -4416,21 +4423,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) if (!ext4_feature_set_ok(sb, (sb_rdonly(sb)))) goto failed_mount; - if (le32_to_cpu(es->s_log_block_size) > - (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) { - ext4_msg(sb, KERN_ERR, - "Invalid log block size: %u", - le32_to_cpu(es->s_log_block_size)); - goto failed_mount; - } - if (le32_to_cpu(es->s_log_cluster_size) > - (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) { - ext4_msg(sb, KERN_ERR, - "Invalid log cluster size: %u", - le32_to_cpu(es->s_log_cluster_size)); - goto failed_mount; - } - if (le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) > (blocksize / 4)) { ext4_msg(sb, KERN_ERR, "Number of reserved GDT blocks insanely large: %d", diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index 023462e80e58d5..b39bf416d51148 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -1600,7 +1600,7 @@ int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) goto out; } - if (NM_I(sbi)->dirty_nat_cnt == 0 && + if (NM_I(sbi)->nat_cnt[DIRTY_NAT] == 0 && SIT_I(sbi)->dirty_sentries == 0 && prefree_segments(sbi) == 0) { f2fs_flush_sit_entries(sbi, cpc); diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c index 14262e0f1cd60e..c5fee4d7ea72fe 100644 --- a/fs/f2fs/compress.c +++ b/fs/f2fs/compress.c @@ -798,8 +798,6 @@ void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity) if (cops->destroy_decompress_ctx) cops->destroy_decompress_ctx(dic); out_free_dic: - if (verity) - atomic_set(&dic->pending_pages, dic->nr_cpages); if (!verity) f2fs_decompress_end_io(dic->rpages, dic->cluster_size, ret, false); diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index be4da52604edc1..b29243ee1c3e54 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -202,7 +202,7 @@ static void f2fs_verify_bio(struct bio *bio) dic = (struct decompress_io_ctx *)page_private(page); if (dic) { - if (atomic_dec_return(&dic->pending_pages)) + if (atomic_dec_return(&dic->verity_pages)) continue; f2fs_verify_pages(dic->rpages, dic->cluster_size); @@ -1027,7 +1027,8 @@ static inline bool f2fs_need_verity(const struct inode *inode, pgoff_t idx) static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr, unsigned nr_pages, unsigned op_flag, - pgoff_t first_idx, bool for_write) + pgoff_t first_idx, bool for_write, + bool for_verity) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct bio *bio; @@ -1049,7 +1050,7 @@ static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr, post_read_steps |= 1 << STEP_DECRYPT; if (f2fs_compressed_file(inode)) post_read_steps |= 1 << STEP_DECOMPRESS_NOWQ; - if (f2fs_need_verity(inode, first_idx)) + if (for_verity && f2fs_need_verity(inode, first_idx)) post_read_steps |= 1 << STEP_VERITY; if (post_read_steps) { @@ -1079,7 +1080,7 @@ static int f2fs_submit_page_read(struct inode *inode, struct page *page, struct bio *bio; bio = f2fs_grab_read_bio(inode, blkaddr, 1, op_flags, - page->index, for_write); + page->index, for_write, true); if (IS_ERR(bio)) return PTR_ERR(bio); @@ -2133,7 +2134,7 @@ static int f2fs_read_single_page(struct inode *inode, struct page *page, if (bio == NULL) { bio = f2fs_grab_read_bio(inode, block_nr, nr_pages, is_readahead ? REQ_RAHEAD : 0, page->index, - false); + false, true); if (IS_ERR(bio)) { ret = PTR_ERR(bio); bio = NULL; @@ -2180,6 +2181,8 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret, const unsigned blkbits = inode->i_blkbits; const unsigned blocksize = 1 << blkbits; struct decompress_io_ctx *dic = NULL; + struct bio_post_read_ctx *ctx; + bool for_verity = false; int i; int ret = 0; @@ -2245,10 +2248,29 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret, goto out_put_dnode; } + /* + * It's possible to enable fsverity on the fly when handling a cluster, + * which requires complicated error handling. Instead of adding more + * complexity, let's give a rule where end_io post-processes fsverity + * per cluster. In order to do that, we need to submit bio, if previous + * bio sets a different post-process policy. + */ + if (fsverity_active(cc->inode)) { + atomic_set(&dic->verity_pages, cc->nr_cpages); + for_verity = true; + + if (bio) { + ctx = bio->bi_private; + if (!(ctx->enabled_steps & (1 << STEP_VERITY))) { + __submit_bio(sbi, bio, DATA); + bio = NULL; + } + } + } + for (i = 0; i < dic->nr_cpages; i++) { struct page *page = dic->cpages[i]; block_t blkaddr; - struct bio_post_read_ctx *ctx; blkaddr = data_blkaddr(dn.inode, dn.node_page, dn.ofs_in_node + i + 1); @@ -2264,17 +2286,31 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret, if (!bio) { bio = f2fs_grab_read_bio(inode, blkaddr, nr_pages, is_readahead ? REQ_RAHEAD : 0, - page->index, for_write); + page->index, for_write, for_verity); if (IS_ERR(bio)) { + unsigned int remained = dic->nr_cpages - i; + bool release = false; + ret = PTR_ERR(bio); dic->failed = true; - if (!atomic_sub_return(dic->nr_cpages - i, - &dic->pending_pages)) { + + if (for_verity) { + if (!atomic_sub_return(remained, + &dic->verity_pages)) + release = true; + } else { + if (!atomic_sub_return(remained, + &dic->pending_pages)) + release = true; + } + + if (release) { f2fs_decompress_end_io(dic->rpages, - cc->cluster_size, true, - false); + cc->cluster_size, true, + false); f2fs_free_dic(dic); } + f2fs_put_dnode(&dn); *bio_ret = NULL; return ret; diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c index a8357fd4f5fab9..197c914119da8e 100644 --- a/fs/f2fs/debug.c +++ b/fs/f2fs/debug.c @@ -145,8 +145,8 @@ static void update_general_status(struct f2fs_sb_info *sbi) si->node_pages = NODE_MAPPING(sbi)->nrpages; if (sbi->meta_inode) si->meta_pages = META_MAPPING(sbi)->nrpages; - si->nats = NM_I(sbi)->nat_cnt; - si->dirty_nats = NM_I(sbi)->dirty_nat_cnt; + si->nats = NM_I(sbi)->nat_cnt[TOTAL_NAT]; + si->dirty_nats = NM_I(sbi)->nat_cnt[DIRTY_NAT]; si->sits = MAIN_SEGS(sbi); si->dirty_sits = SIT_I(sbi)->dirty_sentries; si->free_nids = NM_I(sbi)->nid_cnt[FREE_NID]; @@ -278,9 +278,10 @@ static void update_mem_info(struct f2fs_sb_info *sbi) si->cache_mem += (NM_I(sbi)->nid_cnt[FREE_NID] + NM_I(sbi)->nid_cnt[PREALLOC_NID]) * sizeof(struct free_nid); - si->cache_mem += NM_I(sbi)->nat_cnt * sizeof(struct nat_entry); - si->cache_mem += NM_I(sbi)->dirty_nat_cnt * - sizeof(struct nat_entry_set); + si->cache_mem += NM_I(sbi)->nat_cnt[TOTAL_NAT] * + sizeof(struct nat_entry); + si->cache_mem += NM_I(sbi)->nat_cnt[DIRTY_NAT] * + sizeof(struct nat_entry_set); si->cache_mem += si->inmem_pages * sizeof(struct inmem_pages); for (i = 0; i < MAX_INO_ENTRY; i++) si->cache_mem += sbi->im[i].ino_num * sizeof(struct ino_entry); diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 9a321c52facec1..06e5a6053f3f9b 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -894,6 +894,13 @@ enum nid_state { MAX_NID_STATE, }; +enum nat_state { + TOTAL_NAT, + DIRTY_NAT, + RECLAIMABLE_NAT, + MAX_NAT_STATE, +}; + struct f2fs_nm_info { block_t nat_blkaddr; /* base disk address of NAT */ nid_t max_nid; /* maximum possible node ids */ @@ -909,8 +916,7 @@ struct f2fs_nm_info { struct rw_semaphore nat_tree_lock; /* protect nat_tree_lock */ struct list_head nat_entries; /* cached nat entry list (clean) */ spinlock_t nat_list_lock; /* protect clean nat entry list */ - unsigned int nat_cnt; /* the # of cached nat entries */ - unsigned int dirty_nat_cnt; /* total num of nat entries in set */ + unsigned int nat_cnt[MAX_NAT_STATE]; /* the # of cached nat entries */ unsigned int nat_blocks; /* # of nat blocks */ /* free node ids management */ @@ -1404,6 +1410,7 @@ struct decompress_io_ctx { size_t rlen; /* valid data length in rbuf */ size_t clen; /* valid data length in cbuf */ atomic_t pending_pages; /* in-flight compressed page count */ + atomic_t verity_pages; /* in-flight page count for verity */ bool failed; /* indicate IO error during decompression */ void *private; /* payload buffer for specified decompression algorithm */ void *private2; /* extra payload buffer */ diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index 42394de6c7eb1d..e65d73293a3f63 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -62,8 +62,8 @@ bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type) sizeof(struct free_nid)) >> PAGE_SHIFT; res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2); } else if (type == NAT_ENTRIES) { - mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >> - PAGE_SHIFT; + mem_size = (nm_i->nat_cnt[TOTAL_NAT] * + sizeof(struct nat_entry)) >> PAGE_SHIFT; res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2); if (excess_cached_nats(sbi)) res = false; @@ -177,7 +177,8 @@ static struct nat_entry *__init_nat_entry(struct f2fs_nm_info *nm_i, list_add_tail(&ne->list, &nm_i->nat_entries); spin_unlock(&nm_i->nat_list_lock); - nm_i->nat_cnt++; + nm_i->nat_cnt[TOTAL_NAT]++; + nm_i->nat_cnt[RECLAIMABLE_NAT]++; return ne; } @@ -207,7 +208,8 @@ static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i, static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e) { radix_tree_delete(&nm_i->nat_root, nat_get_nid(e)); - nm_i->nat_cnt--; + nm_i->nat_cnt[TOTAL_NAT]--; + nm_i->nat_cnt[RECLAIMABLE_NAT]--; __free_nat_entry(e); } @@ -253,7 +255,8 @@ static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i, if (get_nat_flag(ne, IS_DIRTY)) goto refresh_list; - nm_i->dirty_nat_cnt++; + nm_i->nat_cnt[DIRTY_NAT]++; + nm_i->nat_cnt[RECLAIMABLE_NAT]--; set_nat_flag(ne, IS_DIRTY, true); refresh_list: spin_lock(&nm_i->nat_list_lock); @@ -273,7 +276,8 @@ static void __clear_nat_cache_dirty(struct f2fs_nm_info *nm_i, set_nat_flag(ne, IS_DIRTY, false); set->entry_cnt--; - nm_i->dirty_nat_cnt--; + nm_i->nat_cnt[DIRTY_NAT]--; + nm_i->nat_cnt[RECLAIMABLE_NAT]++; } static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info *nm_i, @@ -2944,14 +2948,17 @@ int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc) LIST_HEAD(sets); int err = 0; - /* during unmount, let's flush nat_bits before checking dirty_nat_cnt */ + /* + * during unmount, let's flush nat_bits before checking + * nat_cnt[DIRTY_NAT]. + */ if (enabled_nat_bits(sbi, cpc)) { down_write(&nm_i->nat_tree_lock); remove_nats_in_journal(sbi); up_write(&nm_i->nat_tree_lock); } - if (!nm_i->dirty_nat_cnt) + if (!nm_i->nat_cnt[DIRTY_NAT]) return 0; down_write(&nm_i->nat_tree_lock); @@ -2962,7 +2969,8 @@ int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc) * into nat entry set. */ if (enabled_nat_bits(sbi, cpc) || - !__has_cursum_space(journal, nm_i->dirty_nat_cnt, NAT_JOURNAL)) + !__has_cursum_space(journal, + nm_i->nat_cnt[DIRTY_NAT], NAT_JOURNAL)) remove_nats_in_journal(sbi); while ((found = __gang_lookup_nat_set(nm_i, @@ -3086,7 +3094,6 @@ static int init_node_manager(struct f2fs_sb_info *sbi) F2FS_RESERVED_NODE_NUM; nm_i->nid_cnt[FREE_NID] = 0; nm_i->nid_cnt[PREALLOC_NID] = 0; - nm_i->nat_cnt = 0; nm_i->ram_thresh = DEF_RAM_THRESHOLD; nm_i->ra_nid_pages = DEF_RA_NID_PAGES; nm_i->dirty_nats_ratio = DEF_DIRTY_NAT_RATIO_THRESHOLD; @@ -3220,7 +3227,7 @@ void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi) __del_from_nat_cache(nm_i, natvec[idx]); } } - f2fs_bug_on(sbi, nm_i->nat_cnt); + f2fs_bug_on(sbi, nm_i->nat_cnt[TOTAL_NAT]); /* destroy nat set cache */ nid = 0; diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h index 69e5859e993cf7..f84541b57acbbc 100644 --- a/fs/f2fs/node.h +++ b/fs/f2fs/node.h @@ -126,13 +126,13 @@ static inline void raw_nat_from_node_info(struct f2fs_nat_entry *raw_ne, static inline bool excess_dirty_nats(struct f2fs_sb_info *sbi) { - return NM_I(sbi)->dirty_nat_cnt >= NM_I(sbi)->max_nid * + return NM_I(sbi)->nat_cnt[DIRTY_NAT] >= NM_I(sbi)->max_nid * NM_I(sbi)->dirty_nats_ratio / 100; } static inline bool excess_cached_nats(struct f2fs_sb_info *sbi) { - return NM_I(sbi)->nat_cnt >= DEF_NAT_CACHE_THRESHOLD; + return NM_I(sbi)->nat_cnt[TOTAL_NAT] >= DEF_NAT_CACHE_THRESHOLD; } static inline bool excess_dirty_nodes(struct f2fs_sb_info *sbi) diff --git a/fs/f2fs/shrinker.c b/fs/f2fs/shrinker.c index d66de5999a26d8..dd3c3c7a90ec8f 100644 --- a/fs/f2fs/shrinker.c +++ b/fs/f2fs/shrinker.c @@ -18,9 +18,7 @@ static unsigned int shrinker_run_no; static unsigned long __count_nat_entries(struct f2fs_sb_info *sbi) { - long count = NM_I(sbi)->nat_cnt - NM_I(sbi)->dirty_nat_cnt; - - return count > 0 ? count : 0; + return NM_I(sbi)->nat_cnt[RECLAIMABLE_NAT]; } static unsigned long __count_free_nids(struct f2fs_sb_info *sbi) diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index fef22e476c526a..aa284ce7ec00df 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -2744,7 +2744,6 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi, block_t total_sections, blocks_per_seg; struct f2fs_super_block *raw_super = (struct f2fs_super_block *) (bh->b_data + F2FS_SUPER_OFFSET); - unsigned int blocksize; size_t crc_offset = 0; __u32 crc = 0; @@ -2778,10 +2777,10 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi, } /* Currently, support only 4KB block size */ - blocksize = 1 << le32_to_cpu(raw_super->log_blocksize); - if (blocksize != F2FS_BLKSIZE) { - f2fs_info(sbi, "Invalid blocksize (%u), supports only 4KB", - blocksize); + if (le32_to_cpu(raw_super->log_blocksize) != F2FS_BLKSIZE_BITS) { + f2fs_info(sbi, "Invalid log_blocksize (%u), supports only %u", + le32_to_cpu(raw_super->log_blocksize), + F2FS_BLKSIZE_BITS); return -EFSCORRUPTED; } diff --git a/fs/fcntl.c b/fs/fcntl.c index 19ac5baad50fdc..05b36b28f2e87f 100644 --- a/fs/fcntl.c +++ b/fs/fcntl.c @@ -781,9 +781,10 @@ void send_sigio(struct fown_struct *fown, int fd, int band) { struct task_struct *p; enum pid_type type; + unsigned long flags; struct pid *pid; - read_lock(&fown->lock); + read_lock_irqsave(&fown->lock, flags); type = fown->pid_type; pid = fown->pid; @@ -804,7 +805,7 @@ void send_sigio(struct fown_struct *fown, int fd, int band) read_unlock(&tasklist_lock); } out_unlock_fown: - read_unlock(&fown->lock); + read_unlock_irqrestore(&fown->lock, flags); } static void send_sigurg_to_task(struct task_struct *p, @@ -819,9 +820,10 @@ int send_sigurg(struct fown_struct *fown) struct task_struct *p; enum pid_type type; struct pid *pid; + unsigned long flags; int ret = 0; - read_lock(&fown->lock); + read_lock_irqsave(&fown->lock, flags); type = fown->pid_type; pid = fown->pid; @@ -844,7 +846,7 @@ int send_sigurg(struct fown_struct *fown) read_unlock(&tasklist_lock); } out_unlock_fown: - read_unlock(&fown->lock); + read_unlock_irqrestore(&fown->lock, flags); return ret; } diff --git a/fs/fuse/acl.c b/fs/fuse/acl.c index 5a48cee6d7d333..f529075a2ce878 100644 --- a/fs/fuse/acl.c +++ b/fs/fuse/acl.c @@ -19,6 +19,9 @@ struct posix_acl *fuse_get_acl(struct inode *inode, int type) void *value = NULL; struct posix_acl *acl; + if (fuse_is_bad(inode)) + return ERR_PTR(-EIO); + if (!fc->posix_acl || fc->no_getxattr) return NULL; @@ -53,6 +56,9 @@ int fuse_set_acl(struct inode *inode, struct posix_acl *acl, int type) const char *name; int ret; + if (fuse_is_bad(inode)) + return -EIO; + if (!fc->posix_acl || fc->no_setxattr) return -EOPNOTSUPP; diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index ff7dbeb16f88da..ffa031fe52933f 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -202,7 +202,7 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags) int ret; inode = d_inode_rcu(entry); - if (inode && is_bad_inode(inode)) + if (inode && fuse_is_bad(inode)) goto invalid; else if (time_before64(fuse_dentry_time(entry), get_jiffies_64()) || (flags & LOOKUP_REVAL)) { @@ -463,6 +463,9 @@ static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry, bool outarg_valid = true; bool locked; + if (fuse_is_bad(dir)) + return ERR_PTR(-EIO); + locked = fuse_lock_inode(dir); err = fuse_lookup_name(dir->i_sb, get_node_id(dir), &entry->d_name, &outarg, &inode); @@ -606,6 +609,9 @@ static int fuse_atomic_open(struct inode *dir, struct dentry *entry, struct fuse_conn *fc = get_fuse_conn(dir); struct dentry *res = NULL; + if (fuse_is_bad(dir)) + return -EIO; + if (d_in_lookup(entry)) { res = fuse_lookup(dir, entry, 0); if (IS_ERR(res)) @@ -654,6 +660,9 @@ static int create_new_entry(struct fuse_mount *fm, struct fuse_args *args, int err; struct fuse_forget_link *forget; + if (fuse_is_bad(dir)) + return -EIO; + forget = fuse_alloc_forget(); if (!forget) return -ENOMEM; @@ -781,6 +790,9 @@ static int fuse_unlink(struct inode *dir, struct dentry *entry) struct fuse_mount *fm = get_fuse_mount(dir); FUSE_ARGS(args); + if (fuse_is_bad(dir)) + return -EIO; + args.opcode = FUSE_UNLINK; args.nodeid = get_node_id(dir); args.in_numargs = 1; @@ -817,6 +829,9 @@ static int fuse_rmdir(struct inode *dir, struct dentry *entry) struct fuse_mount *fm = get_fuse_mount(dir); FUSE_ARGS(args); + if (fuse_is_bad(dir)) + return -EIO; + args.opcode = FUSE_RMDIR; args.nodeid = get_node_id(dir); args.in_numargs = 1; @@ -895,6 +910,9 @@ static int fuse_rename2(struct inode *olddir, struct dentry *oldent, struct fuse_conn *fc = get_fuse_conn(olddir); int err; + if (fuse_is_bad(olddir)) + return -EIO; + if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT)) return -EINVAL; @@ -1030,7 +1048,7 @@ static int fuse_do_getattr(struct inode *inode, struct kstat *stat, if (!err) { if (fuse_invalid_attr(&outarg.attr) || (inode->i_mode ^ outarg.attr.mode) & S_IFMT) { - make_bad_inode(inode); + fuse_make_bad(inode); err = -EIO; } else { fuse_change_attributes(inode, &outarg.attr, @@ -1232,6 +1250,9 @@ static int fuse_permission(struct inode *inode, int mask) bool refreshed = false; int err = 0; + if (fuse_is_bad(inode)) + return -EIO; + if (!fuse_allow_current_process(fc)) return -EACCES; @@ -1327,7 +1348,7 @@ static const char *fuse_get_link(struct dentry *dentry, struct inode *inode, int err; err = -EIO; - if (is_bad_inode(inode)) + if (fuse_is_bad(inode)) goto out_err; if (fc->cache_symlinks) @@ -1375,7 +1396,7 @@ static int fuse_dir_fsync(struct file *file, loff_t start, loff_t end, struct fuse_conn *fc = get_fuse_conn(inode); int err; - if (is_bad_inode(inode)) + if (fuse_is_bad(inode)) return -EIO; if (fc->no_fsyncdir) @@ -1664,7 +1685,7 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr, if (fuse_invalid_attr(&outarg.attr) || (inode->i_mode ^ outarg.attr.mode) & S_IFMT) { - make_bad_inode(inode); + fuse_make_bad(inode); err = -EIO; goto error; } @@ -1727,6 +1748,9 @@ static int fuse_setattr(struct dentry *entry, struct iattr *attr) struct file *file = (attr->ia_valid & ATTR_FILE) ? attr->ia_file : NULL; int ret; + if (fuse_is_bad(inode)) + return -EIO; + if (!fuse_allow_current_process(get_fuse_conn(inode))) return -EACCES; @@ -1785,6 +1809,9 @@ static int fuse_getattr(const struct path *path, struct kstat *stat, struct inode *inode = d_inode(path->dentry); struct fuse_conn *fc = get_fuse_conn(inode); + if (fuse_is_bad(inode)) + return -EIO; + if (!fuse_allow_current_process(fc)) { if (!request_mask) { /* diff --git a/fs/fuse/file.c b/fs/fuse/file.c index c03034e8c1529c..8b306005453ccf 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -226,6 +226,9 @@ int fuse_open_common(struct inode *inode, struct file *file, bool isdir) bool dax_truncate = (file->f_flags & O_TRUNC) && fc->atomic_o_trunc && FUSE_IS_DAX(inode); + if (fuse_is_bad(inode)) + return -EIO; + err = generic_file_open(inode, file); if (err) return err; @@ -463,7 +466,7 @@ static int fuse_flush(struct file *file, fl_owner_t id) FUSE_ARGS(args); int err; - if (is_bad_inode(inode)) + if (fuse_is_bad(inode)) return -EIO; err = write_inode_now(inode, 1); @@ -535,7 +538,7 @@ static int fuse_fsync(struct file *file, loff_t start, loff_t end, struct fuse_conn *fc = get_fuse_conn(inode); int err; - if (is_bad_inode(inode)) + if (fuse_is_bad(inode)) return -EIO; inode_lock(inode); @@ -859,7 +862,7 @@ static int fuse_readpage(struct file *file, struct page *page) int err; err = -EIO; - if (is_bad_inode(inode)) + if (fuse_is_bad(inode)) goto out; err = fuse_do_readpage(file, page); @@ -952,7 +955,7 @@ static void fuse_readahead(struct readahead_control *rac) struct fuse_conn *fc = get_fuse_conn(inode); unsigned int i, max_pages, nr_pages = 0; - if (is_bad_inode(inode)) + if (fuse_is_bad(inode)) return; max_pages = min_t(unsigned int, fc->max_pages, @@ -1555,7 +1558,7 @@ static ssize_t fuse_file_read_iter(struct kiocb *iocb, struct iov_iter *to) struct fuse_file *ff = file->private_data; struct inode *inode = file_inode(file); - if (is_bad_inode(inode)) + if (fuse_is_bad(inode)) return -EIO; if (FUSE_IS_DAX(inode)) @@ -1573,7 +1576,7 @@ static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from) struct fuse_file *ff = file->private_data; struct inode *inode = file_inode(file); - if (is_bad_inode(inode)) + if (fuse_is_bad(inode)) return -EIO; if (FUSE_IS_DAX(inode)) @@ -2172,7 +2175,7 @@ static int fuse_writepages(struct address_space *mapping, int err; err = -EIO; - if (is_bad_inode(inode)) + if (fuse_is_bad(inode)) goto out; data.inode = inode; @@ -2954,7 +2957,7 @@ long fuse_ioctl_common(struct file *file, unsigned int cmd, if (!fuse_allow_current_process(fc)) return -EACCES; - if (is_bad_inode(inode)) + if (fuse_is_bad(inode)) return -EIO; return fuse_do_ioctl(file, cmd, arg, flags); diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index d51598017d1330..404d66f01e8d7b 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -172,6 +172,8 @@ enum { FUSE_I_INIT_RDPLUS, /** An operation changing file size is in progress */ FUSE_I_SIZE_UNSTABLE, + /* Bad inode */ + FUSE_I_BAD, }; struct fuse_conn; @@ -858,6 +860,16 @@ static inline u64 fuse_get_attr_version(struct fuse_conn *fc) return atomic64_read(&fc->attr_version); } +static inline void fuse_make_bad(struct inode *inode) +{ + set_bit(FUSE_I_BAD, &get_fuse_inode(inode)->state); +} + +static inline bool fuse_is_bad(struct inode *inode) +{ + return unlikely(test_bit(FUSE_I_BAD, &get_fuse_inode(inode)->state)); +} + /** Device operations */ extern const struct file_operations fuse_dev_operations; diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 1a47afc95f8000..f94b0bb57619ca 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -132,7 +132,7 @@ static void fuse_evict_inode(struct inode *inode) fi->forget = NULL; } } - if (S_ISREG(inode->i_mode) && !is_bad_inode(inode)) { + if (S_ISREG(inode->i_mode) && !fuse_is_bad(inode)) { WARN_ON(!list_empty(&fi->write_files)); WARN_ON(!list_empty(&fi->queued_writes)); } @@ -342,7 +342,7 @@ struct inode *fuse_iget(struct super_block *sb, u64 nodeid, unlock_new_inode(inode); } else if ((inode->i_mode ^ attr->mode) & S_IFMT) { /* Inode has changed type, any I/O on the old should fail */ - make_bad_inode(inode); + fuse_make_bad(inode); iput(inode); goto retry; } diff --git a/fs/fuse/readdir.c b/fs/fuse/readdir.c index 3b5e91045871a1..3441ffa740f3d7 100644 --- a/fs/fuse/readdir.c +++ b/fs/fuse/readdir.c @@ -207,7 +207,7 @@ static int fuse_direntplus_link(struct file *file, dput(dentry); goto retry; } - if (is_bad_inode(inode)) { + if (fuse_is_bad(inode)) { dput(dentry); return -EIO; } @@ -568,7 +568,7 @@ int fuse_readdir(struct file *file, struct dir_context *ctx) struct inode *inode = file_inode(file); int err; - if (is_bad_inode(inode)) + if (fuse_is_bad(inode)) return -EIO; mutex_lock(&ff->readdir.lock); diff --git a/fs/fuse/xattr.c b/fs/fuse/xattr.c index 371bdcbc723372..cdea18de94f7e8 100644 --- a/fs/fuse/xattr.c +++ b/fs/fuse/xattr.c @@ -113,6 +113,9 @@ ssize_t fuse_listxattr(struct dentry *entry, char *list, size_t size) struct fuse_getxattr_out outarg; ssize_t ret; + if (fuse_is_bad(inode)) + return -EIO; + if (!fuse_allow_current_process(fm->fc)) return -EACCES; @@ -178,6 +181,9 @@ static int fuse_xattr_get(const struct xattr_handler *handler, struct dentry *dentry, struct inode *inode, const char *name, void *value, size_t size) { + if (fuse_is_bad(inode)) + return -EIO; + return fuse_getxattr(inode, name, value, size); } @@ -186,6 +192,9 @@ static int fuse_xattr_set(const struct xattr_handler *handler, const char *name, const void *value, size_t size, int flags) { + if (fuse_is_bad(inode)) + return -EIO; + if (!value) return fuse_removexattr(inode, name); diff --git a/fs/io_uring.c b/fs/io_uring.c index 0fcd065baa7606..1f798c5c4213ec 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -941,6 +941,10 @@ enum io_mem_account { ACCT_PINNED, }; +static void destroy_fixed_file_ref_node(struct fixed_file_ref_node *ref_node); +static struct fixed_file_ref_node *alloc_fixed_file_ref_node( + struct io_ring_ctx *ctx); + static void __io_complete_rw(struct io_kiocb *req, long res, long res2, struct io_comp_state *cs); static void io_cqring_fill_event(struct io_kiocb *req, long res); @@ -1369,6 +1373,13 @@ static bool io_grab_identity(struct io_kiocb *req) spin_unlock_irq(&ctx->inflight_lock); req->work.flags |= IO_WQ_WORK_FILES; } + if (!(req->work.flags & IO_WQ_WORK_MM) && + (def->work_flags & IO_WQ_WORK_MM)) { + if (id->mm != current->mm) + return false; + mmgrab(id->mm); + req->work.flags |= IO_WQ_WORK_MM; + } return true; } @@ -1393,13 +1404,6 @@ static void io_prep_async_work(struct io_kiocb *req) req->work.flags |= IO_WQ_WORK_UNBOUND; } - /* ->mm can never change on us */ - if (!(req->work.flags & IO_WQ_WORK_MM) && - (def->work_flags & IO_WQ_WORK_MM)) { - mmgrab(id->mm); - req->work.flags |= IO_WQ_WORK_MM; - } - /* if we fail grabbing identity, we must COW, regrab, and retry */ if (io_grab_identity(req)) return; @@ -1632,8 +1636,6 @@ static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force, LIST_HEAD(list); if (!force) { - if (list_empty_careful(&ctx->cq_overflow_list)) - return true; if ((ctx->cached_cq_tail - READ_ONCE(rings->cq.head) == rings->cq_ring_entries)) return false; @@ -5861,15 +5863,15 @@ static void io_req_drop_files(struct io_kiocb *req) struct io_ring_ctx *ctx = req->ctx; unsigned long flags; + put_files_struct(req->work.identity->files); + put_nsproxy(req->work.identity->nsproxy); spin_lock_irqsave(&ctx->inflight_lock, flags); list_del(&req->inflight_entry); - if (waitqueue_active(&ctx->inflight_wait)) - wake_up(&ctx->inflight_wait); spin_unlock_irqrestore(&ctx->inflight_lock, flags); req->flags &= ~REQ_F_INFLIGHT; - put_files_struct(req->work.identity->files); - put_nsproxy(req->work.identity->nsproxy); req->work.flags &= ~IO_WQ_WORK_FILES; + if (waitqueue_active(&ctx->inflight_wait)) + wake_up(&ctx->inflight_wait); } static void __io_clean_op(struct io_kiocb *req) @@ -6575,8 +6577,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr) /* if we have a backlog and couldn't flush it all, return BUSY */ if (test_bit(0, &ctx->sq_check_overflow)) { - if (!list_empty(&ctx->cq_overflow_list) && - !io_cqring_overflow_flush(ctx, false, NULL, NULL)) + if (!io_cqring_overflow_flush(ctx, false, NULL, NULL)) return -EBUSY; } @@ -6798,8 +6799,16 @@ static int io_sq_thread(void *data) * kthread parking. This synchronizes the thread vs users, * the users are synchronized on the sqd->ctx_lock. */ - if (kthread_should_park()) + if (kthread_should_park()) { kthread_parkme(); + /* + * When sq thread is unparked, in case the previous park operation + * comes from io_put_sq_data(), which means that sq thread is going + * to be stopped, so here needs to have a check. + */ + if (kthread_should_stop()) + break; + } if (unlikely(!list_empty(&sqd->ctx_new_list))) io_sqd_init_new(sqd); @@ -6991,18 +7000,32 @@ static void io_file_ref_kill(struct percpu_ref *ref) complete(&data->done); } +static void io_sqe_files_set_node(struct fixed_file_data *file_data, + struct fixed_file_ref_node *ref_node) +{ + spin_lock_bh(&file_data->lock); + file_data->node = ref_node; + list_add_tail(&ref_node->node, &file_data->ref_list); + spin_unlock_bh(&file_data->lock); + percpu_ref_get(&file_data->refs); +} + static int io_sqe_files_unregister(struct io_ring_ctx *ctx) { struct fixed_file_data *data = ctx->file_data; - struct fixed_file_ref_node *ref_node = NULL; + struct fixed_file_ref_node *backup_node, *ref_node = NULL; unsigned nr_tables, i; + int ret; if (!data) return -ENXIO; + backup_node = alloc_fixed_file_ref_node(ctx); + if (!backup_node) + return -ENOMEM; - spin_lock(&data->lock); + spin_lock_bh(&data->lock); ref_node = data->node; - spin_unlock(&data->lock); + spin_unlock_bh(&data->lock); if (ref_node) percpu_ref_kill(&ref_node->refs); @@ -7010,7 +7033,18 @@ static int io_sqe_files_unregister(struct io_ring_ctx *ctx) /* wait for all refs nodes to complete */ flush_delayed_work(&ctx->file_put_work); - wait_for_completion(&data->done); + do { + ret = wait_for_completion_interruptible(&data->done); + if (!ret) + break; + ret = io_run_task_work_sig(); + if (ret < 0) { + percpu_ref_resurrect(&data->refs); + reinit_completion(&data->done); + io_sqe_files_set_node(data, backup_node); + return ret; + } + } while (1); __io_sqe_files_unregister(ctx); nr_tables = DIV_ROUND_UP(ctx->nr_user_files, IORING_MAX_FILES_TABLE); @@ -7021,6 +7055,7 @@ static int io_sqe_files_unregister(struct io_ring_ctx *ctx) kfree(data); ctx->file_data = NULL; ctx->nr_user_files = 0; + destroy_fixed_file_ref_node(backup_node); return 0; } @@ -7385,7 +7420,7 @@ static void io_file_data_ref_zero(struct percpu_ref *ref) data = ref_node->file_data; ctx = data->ctx; - spin_lock(&data->lock); + spin_lock_bh(&data->lock); ref_node->done = true; while (!list_empty(&data->ref_list)) { @@ -7397,7 +7432,7 @@ static void io_file_data_ref_zero(struct percpu_ref *ref) list_del(&ref_node->node); first_add |= llist_add(&ref_node->llist, &ctx->file_put_llist); } - spin_unlock(&data->lock); + spin_unlock_bh(&data->lock); if (percpu_ref_is_dying(&data->refs)) delay = 0; @@ -7519,11 +7554,7 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg, return PTR_ERR(ref_node); } - file_data->node = ref_node; - spin_lock(&file_data->lock); - list_add_tail(&ref_node->node, &file_data->ref_list); - spin_unlock(&file_data->lock); - percpu_ref_get(&file_data->refs); + io_sqe_files_set_node(file_data, ref_node); return ret; out_fput: for (i = 0; i < ctx->nr_user_files; i++) { @@ -7679,11 +7710,7 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx, if (needs_switch) { percpu_ref_kill(&data->node->refs); - spin_lock(&data->lock); - list_add_tail(&ref_node->node, &data->ref_list); - data->node = ref_node; - spin_unlock(&data->lock); - percpu_ref_get(&ctx->file_data->refs); + io_sqe_files_set_node(data, ref_node); } else destroy_fixed_file_ref_node(ref_node); diff --git a/fs/jffs2/jffs2_fs_sb.h b/fs/jffs2/jffs2_fs_sb.h index 778275f48a8795..5a7091746f68b3 100644 --- a/fs/jffs2/jffs2_fs_sb.h +++ b/fs/jffs2/jffs2_fs_sb.h @@ -38,6 +38,7 @@ struct jffs2_mount_opts { * users. This is implemented simply by means of not allowing the * latter users to write to the file system if the amount if the * available space is less then 'rp_size'. */ + bool set_rp_size; unsigned int rp_size; }; diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c index 4fd297bdf0f3ff..81ca58c10b728c 100644 --- a/fs/jffs2/super.c +++ b/fs/jffs2/super.c @@ -88,7 +88,7 @@ static int jffs2_show_options(struct seq_file *s, struct dentry *root) if (opts->override_compr) seq_printf(s, ",compr=%s", jffs2_compr_name(opts->compr)); - if (opts->rp_size) + if (opts->set_rp_size) seq_printf(s, ",rp_size=%u", opts->rp_size / 1024); return 0; @@ -202,11 +202,8 @@ static int jffs2_parse_param(struct fs_context *fc, struct fs_parameter *param) case Opt_rp_size: if (result.uint_32 > UINT_MAX / 1024) return invalf(fc, "jffs2: rp_size unrepresentable"); - opt = result.uint_32 * 1024; - if (opt > c->mtd->size) - return invalf(fc, "jffs2: Too large reserve pool specified, max is %llu KB", - c->mtd->size / 1024); - c->mount_opts.rp_size = opt; + c->mount_opts.rp_size = result.uint_32 * 1024; + c->mount_opts.set_rp_size = true; break; default: return -EINVAL; @@ -225,8 +222,10 @@ static inline void jffs2_update_mount_opts(struct fs_context *fc) c->mount_opts.override_compr = new_c->mount_opts.override_compr; c->mount_opts.compr = new_c->mount_opts.compr; } - if (new_c->mount_opts.rp_size) + if (new_c->mount_opts.set_rp_size) { + c->mount_opts.set_rp_size = new_c->mount_opts.set_rp_size; c->mount_opts.rp_size = new_c->mount_opts.rp_size; + } mutex_unlock(&c->alloc_sem); } @@ -266,6 +265,10 @@ static int jffs2_fill_super(struct super_block *sb, struct fs_context *fc) c->mtd = sb->s_mtd; c->os_priv = sb; + if (c->mount_opts.rp_size > c->mtd->size) + return invalf(fc, "jffs2: Too large reserve pool specified, max is %llu KB", + c->mtd->size / 1024); + /* Initialize JFFS2 superblock locks, the further initialization will * be done later */ mutex_init(&c->alloc_sem); diff --git a/fs/namespace.c b/fs/namespace.c index cebaa3e8179406..93006abe7946a1 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -156,10 +156,10 @@ static inline void mnt_add_count(struct mount *mnt, int n) /* * vfsmount lock must be held for write */ -unsigned int mnt_get_count(struct mount *mnt) +int mnt_get_count(struct mount *mnt) { #ifdef CONFIG_SMP - unsigned int count = 0; + int count = 0; int cpu; for_each_possible_cpu(cpu) { @@ -1139,6 +1139,7 @@ static DECLARE_DELAYED_WORK(delayed_mntput_work, delayed_mntput); static void mntput_no_expire(struct mount *mnt) { LIST_HEAD(list); + int count; rcu_read_lock(); if (likely(READ_ONCE(mnt->mnt_ns))) { @@ -1162,7 +1163,9 @@ static void mntput_no_expire(struct mount *mnt) */ smp_mb(); mnt_add_count(mnt, -1); - if (mnt_get_count(mnt)) { + count = mnt_get_count(mnt); + if (count != 0) { + WARN_ON(count < 0); rcu_read_unlock(); unlock_mount_hash(); return; diff --git a/fs/nfs/nfs42xdr.c b/fs/nfs/nfs42xdr.c index 8432bd6b95f08d..c078f88552695d 100644 --- a/fs/nfs/nfs42xdr.c +++ b/fs/nfs/nfs42xdr.c @@ -1019,29 +1019,24 @@ static int decode_deallocate(struct xdr_stream *xdr, struct nfs42_falloc_res *re return decode_op_hdr(xdr, OP_DEALLOCATE); } -static int decode_read_plus_data(struct xdr_stream *xdr, struct nfs_pgio_res *res, - uint32_t *eof) +static int decode_read_plus_data(struct xdr_stream *xdr, + struct nfs_pgio_res *res) { uint32_t count, recvd; uint64_t offset; __be32 *p; p = xdr_inline_decode(xdr, 8 + 4); - if (unlikely(!p)) - return -EIO; + if (!p) + return 1; p = xdr_decode_hyper(p, &offset); count = be32_to_cpup(p); recvd = xdr_align_data(xdr, res->count, count); res->count += recvd; - if (count > recvd) { - dprintk("NFS: server cheating in read reply: " - "count %u > recvd %u\n", count, recvd); - *eof = 0; + if (count > recvd) return 1; - } - return 0; } @@ -1052,18 +1047,16 @@ static int decode_read_plus_hole(struct xdr_stream *xdr, struct nfs_pgio_res *re __be32 *p; p = xdr_inline_decode(xdr, 8 + 8); - if (unlikely(!p)) - return -EIO; + if (!p) + return 1; p = xdr_decode_hyper(p, &offset); p = xdr_decode_hyper(p, &length); recvd = xdr_expand_hole(xdr, res->count, length); res->count += recvd; - if (recvd < length) { - *eof = 0; + if (recvd < length) return 1; - } return 0; } @@ -1088,12 +1081,12 @@ static int decode_read_plus(struct xdr_stream *xdr, struct nfs_pgio_res *res) for (i = 0; i < segments; i++) { p = xdr_inline_decode(xdr, 4); - if (unlikely(!p)) - return -EIO; + if (!p) + goto early_out; type = be32_to_cpup(p++); if (type == NFS4_CONTENT_DATA) - status = decode_read_plus_data(xdr, res, &eof); + status = decode_read_plus_data(xdr, res); else if (type == NFS4_CONTENT_HOLE) status = decode_read_plus_hole(xdr, res, &eof); else @@ -1102,12 +1095,17 @@ static int decode_read_plus(struct xdr_stream *xdr, struct nfs_pgio_res *res) if (status < 0) return status; if (status > 0) - break; + goto early_out; } out: res->eof = eof; return 0; +early_out: + if (unlikely(!i)) + return -EIO; + res->eof = 0; + return 0; } static int decode_seek(struct xdr_stream *xdr, struct nfs42_seek_res *res) diff --git a/fs/nfs/nfs4super.c b/fs/nfs/nfs4super.c index 93f5c1678ec291..984cc42ee54d8c 100644 --- a/fs/nfs/nfs4super.c +++ b/fs/nfs/nfs4super.c @@ -67,7 +67,7 @@ static void nfs4_evict_inode(struct inode *inode) nfs_inode_evict_delegation(inode); /* Note that above delegreturn would trigger pnfs return-on-close */ pnfs_return_layout(inode); - pnfs_destroy_layout(NFS_I(inode)); + pnfs_destroy_layout_final(NFS_I(inode)); /* First call standard NFS clear_inode() code */ nfs_clear_inode(inode); nfs4_xattr_cache_zap(inode); diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 0e50b9d45c3209..07f59dc8cb2e77 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -294,6 +294,7 @@ void pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo) { struct inode *inode; + unsigned long i_state; if (!lo) return; @@ -304,8 +305,12 @@ pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo) if (!list_empty(&lo->plh_segs)) WARN_ONCE(1, "NFS: BUG unfreed layout segments.\n"); pnfs_detach_layout_hdr(lo); + i_state = inode->i_state; spin_unlock(&inode->i_lock); pnfs_free_layout_hdr(lo); + /* Notify pnfs_destroy_layout_final() that we're done */ + if (i_state & (I_FREEING | I_CLEAR)) + wake_up_var(lo); } } @@ -734,8 +739,7 @@ pnfs_free_lseg_list(struct list_head *free_me) } } -void -pnfs_destroy_layout(struct nfs_inode *nfsi) +static struct pnfs_layout_hdr *__pnfs_destroy_layout(struct nfs_inode *nfsi) { struct pnfs_layout_hdr *lo; LIST_HEAD(tmp_list); @@ -753,9 +757,34 @@ pnfs_destroy_layout(struct nfs_inode *nfsi) pnfs_put_layout_hdr(lo); } else spin_unlock(&nfsi->vfs_inode.i_lock); + return lo; +} + +void pnfs_destroy_layout(struct nfs_inode *nfsi) +{ + __pnfs_destroy_layout(nfsi); } EXPORT_SYMBOL_GPL(pnfs_destroy_layout); +static bool pnfs_layout_removed(struct nfs_inode *nfsi, + struct pnfs_layout_hdr *lo) +{ + bool ret; + + spin_lock(&nfsi->vfs_inode.i_lock); + ret = nfsi->layout != lo; + spin_unlock(&nfsi->vfs_inode.i_lock); + return ret; +} + +void pnfs_destroy_layout_final(struct nfs_inode *nfsi) +{ + struct pnfs_layout_hdr *lo = __pnfs_destroy_layout(nfsi); + + if (lo) + wait_var_event(lo, pnfs_layout_removed(nfsi, lo)); +} + static bool pnfs_layout_add_bulk_destroy_list(struct inode *inode, struct list_head *layout_list) diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index 2661c44c62db40..78c38939184860 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h @@ -266,6 +266,7 @@ struct pnfs_layout_segment *pnfs_layout_process(struct nfs4_layoutget *lgp); void pnfs_layoutget_free(struct nfs4_layoutget *lgp); void pnfs_free_lseg_list(struct list_head *tmp_list); void pnfs_destroy_layout(struct nfs_inode *); +void pnfs_destroy_layout_final(struct nfs_inode *); void pnfs_destroy_all_layouts(struct nfs_client *); int pnfs_destroy_layouts_byfsid(struct nfs_client *clp, struct nfs_fsid *fsid, @@ -710,6 +711,10 @@ static inline void pnfs_destroy_layout(struct nfs_inode *nfsi) { } +static inline void pnfs_destroy_layout_final(struct nfs_inode *nfsi) +{ +} + static inline struct pnfs_layout_segment * pnfs_get_lseg(struct pnfs_layout_segment *lseg) { diff --git a/fs/pnode.h b/fs/pnode.h index 49a058c73e4c77..26f74e092bd981 100644 --- a/fs/pnode.h +++ b/fs/pnode.h @@ -44,7 +44,7 @@ int propagate_mount_busy(struct mount *, int); void propagate_mount_unlock(struct mount *); void mnt_release_group_id(struct mount *); int get_dominating_id(struct mount *mnt, const struct path *root); -unsigned int mnt_get_count(struct mount *mnt); +int mnt_get_count(struct mount *mnt); void mnt_set_mountpoint(struct mount *, struct mountpoint *, struct mount *); void mnt_change_mountpoint(struct mount *parent, struct mountpoint *mp, diff --git a/fs/proc/base.c b/fs/proc/base.c index b362523a9829ac..55ce0ee9c5c734 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -405,11 +405,11 @@ static int proc_pid_wchan(struct seq_file *m, struct pid_namespace *ns, static int lock_trace(struct task_struct *task) { - int err = mutex_lock_killable(&task->signal->exec_update_mutex); + int err = down_read_killable(&task->signal->exec_update_lock); if (err) return err; if (!ptrace_may_access(task, PTRACE_MODE_ATTACH_FSCREDS)) { - mutex_unlock(&task->signal->exec_update_mutex); + up_read(&task->signal->exec_update_lock); return -EPERM; } return 0; @@ -417,7 +417,7 @@ static int lock_trace(struct task_struct *task) static void unlock_trace(struct task_struct *task) { - mutex_unlock(&task->signal->exec_update_mutex); + up_read(&task->signal->exec_update_lock); } #ifdef CONFIG_STACKTRACE @@ -2930,7 +2930,7 @@ static int do_io_accounting(struct task_struct *task, struct seq_file *m, int wh unsigned long flags; int result; - result = mutex_lock_killable(&task->signal->exec_update_mutex); + result = down_read_killable(&task->signal->exec_update_lock); if (result) return result; @@ -2966,7 +2966,7 @@ static int do_io_accounting(struct task_struct *task, struct seq_file *m, int wh result = 0; out_unlock: - mutex_unlock(&task->signal->exec_update_mutex); + up_read(&task->signal->exec_update_lock); return result; } diff --git a/fs/quota/quota_tree.c b/fs/quota/quota_tree.c index a6f856f341dc7b..c5562c871c8bef 100644 --- a/fs/quota/quota_tree.c +++ b/fs/quota/quota_tree.c @@ -62,7 +62,7 @@ static ssize_t read_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf) memset(buf, 0, info->dqi_usable_bs); return sb->s_op->quota_read(sb, info->dqi_type, buf, - info->dqi_usable_bs, blk << info->dqi_blocksize_bits); + info->dqi_usable_bs, (loff_t)blk << info->dqi_blocksize_bits); } static ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf) @@ -71,7 +71,7 @@ static ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf) ssize_t ret; ret = sb->s_op->quota_write(sb, info->dqi_type, buf, - info->dqi_usable_bs, blk << info->dqi_blocksize_bits); + info->dqi_usable_bs, (loff_t)blk << info->dqi_blocksize_bits); if (ret != info->dqi_usable_bs) { quota_error(sb, "dquota write failed"); if (ret >= 0) @@ -284,7 +284,7 @@ static uint find_free_dqentry(struct qtree_mem_dqinfo *info, blk); goto out_buf; } - dquot->dq_off = (blk << info->dqi_blocksize_bits) + + dquot->dq_off = ((loff_t)blk << info->dqi_blocksize_bits) + sizeof(struct qt_disk_dqdbheader) + i * info->dqi_entry_size; kfree(buf); @@ -559,7 +559,7 @@ static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info, ret = -EIO; goto out_buf; } else { - ret = (blk << info->dqi_blocksize_bits) + sizeof(struct + ret = ((loff_t)blk << info->dqi_blocksize_bits) + sizeof(struct qt_disk_dqdbheader) + i * info->dqi_entry_size; } out_buf: diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c index 8bf88d690729e3..476a7ff494822f 100644 --- a/fs/reiserfs/stree.c +++ b/fs/reiserfs/stree.c @@ -454,6 +454,12 @@ static int is_leaf(char *buf, int blocksize, struct buffer_head *bh) "(second one): %h", ih); return 0; } + if (is_direntry_le_ih(ih) && (ih_item_len(ih) < (ih_entry_count(ih) * IH_SIZE))) { + reiserfs_warning(NULL, "reiserfs-5093", + "item entry count seems wrong %h", + ih); + return 0; + } prev_location = ih_location(ih); } diff --git a/include/linux/kdev_t.h b/include/linux/kdev_t.h index 85b5151911cfd0..4856706fbfeb45 100644 --- a/include/linux/kdev_t.h +++ b/include/linux/kdev_t.h @@ -21,61 +21,61 @@ }) /* acceptable for old filesystems */ -static inline bool old_valid_dev(dev_t dev) +static __always_inline bool old_valid_dev(dev_t dev) { return MAJOR(dev) < 256 && MINOR(dev) < 256; } -static inline u16 old_encode_dev(dev_t dev) +static __always_inline u16 old_encode_dev(dev_t dev) { return (MAJOR(dev) << 8) | MINOR(dev); } -static inline dev_t old_decode_dev(u16 val) +static __always_inline dev_t old_decode_dev(u16 val) { return MKDEV((val >> 8) & 255, val & 255); } -static inline u32 new_encode_dev(dev_t dev) +static __always_inline u32 new_encode_dev(dev_t dev) { unsigned major = MAJOR(dev); unsigned minor = MINOR(dev); return (minor & 0xff) | (major << 8) | ((minor & ~0xff) << 12); } -static inline dev_t new_decode_dev(u32 dev) +static __always_inline dev_t new_decode_dev(u32 dev) { unsigned major = (dev & 0xfff00) >> 8; unsigned minor = (dev & 0xff) | ((dev >> 12) & 0xfff00); return MKDEV(major, minor); } -static inline u64 huge_encode_dev(dev_t dev) +static __always_inline u64 huge_encode_dev(dev_t dev) { return new_encode_dev(dev); } -static inline dev_t huge_decode_dev(u64 dev) +static __always_inline dev_t huge_decode_dev(u64 dev) { return new_decode_dev(dev); } -static inline int sysv_valid_dev(dev_t dev) +static __always_inline int sysv_valid_dev(dev_t dev) { return MAJOR(dev) < (1<<14) && MINOR(dev) < (1<<18); } -static inline u32 sysv_encode_dev(dev_t dev) +static __always_inline u32 sysv_encode_dev(dev_t dev) { return MINOR(dev) | (MAJOR(dev) << 18); } -static inline unsigned sysv_major(u32 dev) +static __always_inline unsigned sysv_major(u32 dev) { return (dev >> 18) & 0x3fff; } -static inline unsigned sysv_minor(u32 dev) +static __always_inline unsigned sysv_minor(u32 dev) { return dev & 0x3ffff; } diff --git a/include/linux/mm.h b/include/linux/mm.h index db6ae4d3fb4edc..cd5c313729ea1d 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2439,8 +2439,9 @@ extern int __meminit __early_pfn_to_nid(unsigned long pfn, #endif extern void set_dma_reserve(unsigned long new_dma_reserve); -extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long, - enum meminit_context, struct vmem_altmap *, int migratetype); +extern void memmap_init_zone(unsigned long, int, unsigned long, + unsigned long, unsigned long, enum meminit_context, + struct vmem_altmap *, int migratetype); extern void setup_per_zone_wmarks(void); extern int __meminit init_per_zone_wmark_min(void); extern void mem_init(void); diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index 25e3fde8561781..4c715be4871719 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h @@ -123,6 +123,7 @@ static inline int rwsem_is_contended(struct rw_semaphore *sem) * lock for reading */ extern void down_read(struct rw_semaphore *sem); +extern int __must_check down_read_interruptible(struct rw_semaphore *sem); extern int __must_check down_read_killable(struct rw_semaphore *sem); /* @@ -171,6 +172,7 @@ extern void downgrade_write(struct rw_semaphore *sem); * See Documentation/locking/lockdep-design.rst for more details.) */ extern void down_read_nested(struct rw_semaphore *sem, int subclass); +extern int __must_check down_read_killable_nested(struct rw_semaphore *sem, int subclass); extern void down_write_nested(struct rw_semaphore *sem, int subclass); extern int down_write_killable_nested(struct rw_semaphore *sem, int subclass); extern void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest_lock); @@ -191,6 +193,7 @@ extern void down_read_non_owner(struct rw_semaphore *sem); extern void up_read_non_owner(struct rw_semaphore *sem); #else # define down_read_nested(sem, subclass) down_read(sem) +# define down_read_killable_nested(sem, subclass) down_read_killable(sem) # define down_write_nest_lock(sem, nest_lock) down_write(sem) # define down_write_nested(sem, subclass) down_write(sem) # define down_write_killable_nested(sem, subclass) down_write_killable(sem) diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h index 1bad18a1d8ba71..4b6a8234d7fc23 100644 --- a/include/linux/sched/signal.h +++ b/include/linux/sched/signal.h @@ -228,12 +228,13 @@ struct signal_struct { * credential calculations * (notably. ptrace) * Deprecated do not use in new code. - * Use exec_update_mutex instead. - */ - struct mutex exec_update_mutex; /* Held while task_struct is being - * updated during exec, and may have - * inconsistent permissions. + * Use exec_update_lock instead. */ + struct rw_semaphore exec_update_lock; /* Held while task_struct is + * being updated during exec, + * and may have inconsistent + * permissions. + */ } __randomize_layout; /* diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 9bf6c319a670e2..65771bef5e654a 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -3943,6 +3943,16 @@ static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt) -ENOSYS; } +/* + * Drivers that don't need a DMA mapping at the RDMA layer, set dma_device to + * NULL. This causes the ib_dma* helpers to just stash the kernel virtual + * address into the dma address. + */ +static inline bool ib_uses_virt_dma(struct ib_device *dev) +{ + return IS_ENABLED(CONFIG_INFINIBAND_VIRT_DMA) && !dev->dma_device; +} + /** * ib_dma_mapping_error - check a DMA addr for error * @dev: The device for which the dma_addr was created @@ -3950,6 +3960,8 @@ static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt) */ static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr) { + if (ib_uses_virt_dma(dev)) + return 0; return dma_mapping_error(dev->dma_device, dma_addr); } @@ -3964,6 +3976,8 @@ static inline u64 ib_dma_map_single(struct ib_device *dev, void *cpu_addr, size_t size, enum dma_data_direction direction) { + if (ib_uses_virt_dma(dev)) + return (uintptr_t)cpu_addr; return dma_map_single(dev->dma_device, cpu_addr, size, direction); } @@ -3978,7 +3992,8 @@ static inline void ib_dma_unmap_single(struct ib_device *dev, u64 addr, size_t size, enum dma_data_direction direction) { - dma_unmap_single(dev->dma_device, addr, size, direction); + if (!ib_uses_virt_dma(dev)) + dma_unmap_single(dev->dma_device, addr, size, direction); } /** @@ -3995,6 +4010,8 @@ static inline u64 ib_dma_map_page(struct ib_device *dev, size_t size, enum dma_data_direction direction) { + if (ib_uses_virt_dma(dev)) + return (uintptr_t)(page_address(page) + offset); return dma_map_page(dev->dma_device, page, offset, size, direction); } @@ -4009,7 +4026,30 @@ static inline void ib_dma_unmap_page(struct ib_device *dev, u64 addr, size_t size, enum dma_data_direction direction) { - dma_unmap_page(dev->dma_device, addr, size, direction); + if (!ib_uses_virt_dma(dev)) + dma_unmap_page(dev->dma_device, addr, size, direction); +} + +int ib_dma_virt_map_sg(struct ib_device *dev, struct scatterlist *sg, int nents); +static inline int ib_dma_map_sg_attrs(struct ib_device *dev, + struct scatterlist *sg, int nents, + enum dma_data_direction direction, + unsigned long dma_attrs) +{ + if (ib_uses_virt_dma(dev)) + return ib_dma_virt_map_sg(dev, sg, nents); + return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, + dma_attrs); +} + +static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev, + struct scatterlist *sg, int nents, + enum dma_data_direction direction, + unsigned long dma_attrs) +{ + if (!ib_uses_virt_dma(dev)) + dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, + dma_attrs); } /** @@ -4023,7 +4063,7 @@ static inline int ib_dma_map_sg(struct ib_device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction) { - return dma_map_sg(dev->dma_device, sg, nents, direction); + return ib_dma_map_sg_attrs(dev, sg, nents, direction, 0); } /** @@ -4037,24 +4077,7 @@ static inline void ib_dma_unmap_sg(struct ib_device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction) { - dma_unmap_sg(dev->dma_device, sg, nents, direction); -} - -static inline int ib_dma_map_sg_attrs(struct ib_device *dev, - struct scatterlist *sg, int nents, - enum dma_data_direction direction, - unsigned long dma_attrs) -{ - return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, - dma_attrs); -} - -static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev, - struct scatterlist *sg, int nents, - enum dma_data_direction direction, - unsigned long dma_attrs) -{ - dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, dma_attrs); + ib_dma_unmap_sg_attrs(dev, sg, nents, direction, 0); } /** @@ -4065,6 +4088,8 @@ static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev, */ static inline unsigned int ib_dma_max_seg_size(struct ib_device *dev) { + if (ib_uses_virt_dma(dev)) + return UINT_MAX; return dma_get_max_seg_size(dev->dma_device); } @@ -4080,7 +4105,8 @@ static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev, size_t size, enum dma_data_direction dir) { - dma_sync_single_for_cpu(dev->dma_device, addr, size, dir); + if (!ib_uses_virt_dma(dev)) + dma_sync_single_for_cpu(dev->dma_device, addr, size, dir); } /** @@ -4095,7 +4121,8 @@ static inline void ib_dma_sync_single_for_device(struct ib_device *dev, size_t size, enum dma_data_direction dir) { - dma_sync_single_for_device(dev->dma_device, addr, size, dir); + if (!ib_uses_virt_dma(dev)) + dma_sync_single_for_device(dev->dma_device, addr, size, dir); } /** diff --git a/include/uapi/linux/const.h b/include/uapi/linux/const.h index 5ed721ad5b1985..af2a44c08683de 100644 --- a/include/uapi/linux/const.h +++ b/include/uapi/linux/const.h @@ -28,4 +28,9 @@ #define _BITUL(x) (_UL(1) << (x)) #define _BITULL(x) (_ULL(1) << (x)) +#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1) +#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask)) + +#define __KERNEL_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) + #endif /* _UAPI_LINUX_CONST_H */ diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index 9ca87bc73c447c..cde753bb209356 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h @@ -14,7 +14,7 @@ #ifndef _UAPI_LINUX_ETHTOOL_H #define _UAPI_LINUX_ETHTOOL_H -#include +#include #include #include diff --git a/include/uapi/linux/kernel.h b/include/uapi/linux/kernel.h index 0ff8f7477847ce..fadf2db71fe8a4 100644 --- a/include/uapi/linux/kernel.h +++ b/include/uapi/linux/kernel.h @@ -3,13 +3,6 @@ #define _UAPI_LINUX_KERNEL_H #include - -/* - * 'kernel.h' contains some often-used function prototypes etc - */ -#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1) -#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask)) - -#define __KERNEL_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) +#include #endif /* _UAPI_LINUX_KERNEL_H */ diff --git a/include/uapi/linux/lightnvm.h b/include/uapi/linux/lightnvm.h index f9a1be7fc6962e..ead2e72e5c88ea 100644 --- a/include/uapi/linux/lightnvm.h +++ b/include/uapi/linux/lightnvm.h @@ -21,7 +21,7 @@ #define _UAPI_LINUX_LIGHTNVM_H #ifdef __KERNEL__ -#include +#include #include #else /* __KERNEL__ */ #include diff --git a/include/uapi/linux/mroute6.h b/include/uapi/linux/mroute6.h index c36177a86516ee..a1fd6173e2dbe9 100644 --- a/include/uapi/linux/mroute6.h +++ b/include/uapi/linux/mroute6.h @@ -2,7 +2,7 @@ #ifndef _UAPI__LINUX_MROUTE6_H #define _UAPI__LINUX_MROUTE6_H -#include +#include #include #include #include /* For struct sockaddr_in6. */ diff --git a/include/uapi/linux/netfilter/x_tables.h b/include/uapi/linux/netfilter/x_tables.h index a8283f7dbc5191..b8c6bb233ac1c1 100644 --- a/include/uapi/linux/netfilter/x_tables.h +++ b/include/uapi/linux/netfilter/x_tables.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_X_TABLES_H #define _UAPI_X_TABLES_H -#include +#include #include #define XT_FUNCTION_MAXNAMELEN 30 diff --git a/include/uapi/linux/netlink.h b/include/uapi/linux/netlink.h index c3816ff7bfc32f..3d94269bbfa87c 100644 --- a/include/uapi/linux/netlink.h +++ b/include/uapi/linux/netlink.h @@ -2,7 +2,7 @@ #ifndef _UAPI__LINUX_NETLINK_H #define _UAPI__LINUX_NETLINK_H -#include +#include #include /* for __kernel_sa_family_t */ #include diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h index 27c1ed2822e698..458179df9b2719 100644 --- a/include/uapi/linux/sysctl.h +++ b/include/uapi/linux/sysctl.h @@ -23,7 +23,7 @@ #ifndef _UAPI_LINUX_SYSCTL_H #define _UAPI_LINUX_SYSCTL_H -#include +#include #include #include diff --git a/init/init_task.c b/init/init_task.c index a56f0abb63e934..15f6eb93a04fa2 100644 --- a/init/init_task.c +++ b/init/init_task.c @@ -26,7 +26,7 @@ static struct signal_struct init_signals = { .multiprocess = HLIST_HEAD_INIT, .rlim = INIT_RLIMITS, .cred_guard_mutex = __MUTEX_INITIALIZER(init_signals.cred_guard_mutex), - .exec_update_mutex = __MUTEX_INITIALIZER(init_signals.exec_update_mutex), + .exec_update_lock = __RWSEM_INITIALIZER(init_signals.exec_update_lock), #ifdef CONFIG_POSIX_TIMERS .posix_timers = LIST_HEAD_INIT(init_signals.posix_timers), .cputimer = { diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c index 191c329e482ad9..32596fdbcd5b8e 100644 --- a/kernel/cgroup/cgroup-v1.c +++ b/kernel/cgroup/cgroup-v1.c @@ -908,6 +908,8 @@ int cgroup1_parse_param(struct fs_context *fc, struct fs_parameter *param) opt = fs_parse(fc, cgroup1_fs_parameters, param, &result); if (opt == -ENOPARAM) { if (strcmp(param->key, "source") == 0) { + if (fc->source) + return invalf(fc, "Multiple sources not supported"); fc->source = param->string; param->string = NULL; return 0; diff --git a/kernel/events/core.c b/kernel/events/core.c index dc568ca295bdc1..c3ba29d058b731 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -1325,7 +1325,7 @@ static void put_ctx(struct perf_event_context *ctx) * function. * * Lock order: - * exec_update_mutex + * exec_update_lock * task_struct::perf_event_mutex * perf_event_context::mutex * perf_event::child_mutex; @@ -11720,24 +11720,6 @@ SYSCALL_DEFINE5(perf_event_open, goto err_task; } - if (task) { - err = mutex_lock_interruptible(&task->signal->exec_update_mutex); - if (err) - goto err_task; - - /* - * Preserve ptrace permission check for backwards compatibility. - * - * We must hold exec_update_mutex across this and any potential - * perf_install_in_context() call for this new event to - * serialize against exec() altering our credentials (and the - * perf_event_exit_task() that could imply). - */ - err = -EACCES; - if (!perfmon_capable() && !ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) - goto err_cred; - } - if (flags & PERF_FLAG_PID_CGROUP) cgroup_fd = pid; @@ -11745,7 +11727,7 @@ SYSCALL_DEFINE5(perf_event_open, NULL, NULL, cgroup_fd); if (IS_ERR(event)) { err = PTR_ERR(event); - goto err_cred; + goto err_task; } if (is_sampling_event(event)) { @@ -11864,6 +11846,24 @@ SYSCALL_DEFINE5(perf_event_open, goto err_context; } + if (task) { + err = down_read_interruptible(&task->signal->exec_update_lock); + if (err) + goto err_file; + + /* + * Preserve ptrace permission check for backwards compatibility. + * + * We must hold exec_update_lock across this and any potential + * perf_install_in_context() call for this new event to + * serialize against exec() altering our credentials (and the + * perf_event_exit_task() that could imply). + */ + err = -EACCES; + if (!perfmon_capable() && !ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) + goto err_cred; + } + if (move_group) { gctx = __perf_event_ctx_lock_double(group_leader, ctx); @@ -12017,7 +12017,7 @@ SYSCALL_DEFINE5(perf_event_open, mutex_unlock(&ctx->mutex); if (task) { - mutex_unlock(&task->signal->exec_update_mutex); + up_read(&task->signal->exec_update_lock); put_task_struct(task); } @@ -12039,7 +12039,10 @@ SYSCALL_DEFINE5(perf_event_open, if (move_group) perf_event_ctx_unlock(group_leader, gctx); mutex_unlock(&ctx->mutex); -/* err_file: */ +err_cred: + if (task) + up_read(&task->signal->exec_update_lock); +err_file: fput(event_file); err_context: perf_unpin_context(ctx); @@ -12051,9 +12054,6 @@ SYSCALL_DEFINE5(perf_event_open, */ if (!event_file) free_event(event); -err_cred: - if (task) - mutex_unlock(&task->signal->exec_update_mutex); err_task: if (task) put_task_struct(task); @@ -12358,7 +12358,7 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn) /* * When a child task exits, feed back event values to parent events. * - * Can be called with exec_update_mutex held when called from + * Can be called with exec_update_lock held when called from * setup_new_exec(). */ void perf_event_exit_task(struct task_struct *child) diff --git a/kernel/fork.c b/kernel/fork.c index dc55f68a6ee36d..c675fdbd3dce13 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1222,7 +1222,7 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode) struct mm_struct *mm; int err; - err = mutex_lock_killable(&task->signal->exec_update_mutex); + err = down_read_killable(&task->signal->exec_update_lock); if (err) return ERR_PTR(err); @@ -1232,7 +1232,7 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode) mmput(mm); mm = ERR_PTR(-EACCES); } - mutex_unlock(&task->signal->exec_update_mutex); + up_read(&task->signal->exec_update_lock); return mm; } @@ -1592,7 +1592,7 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) sig->oom_score_adj_min = current->signal->oom_score_adj_min; mutex_init(&sig->cred_guard_mutex); - mutex_init(&sig->exec_update_mutex); + init_rwsem(&sig->exec_update_lock); return 0; } diff --git a/kernel/kcmp.c b/kernel/kcmp.c index b3ff9288c6cc91..c0d2ad9b4705d8 100644 --- a/kernel/kcmp.c +++ b/kernel/kcmp.c @@ -75,25 +75,25 @@ get_file_raw_ptr(struct task_struct *task, unsigned int idx) return file; } -static void kcmp_unlock(struct mutex *m1, struct mutex *m2) +static void kcmp_unlock(struct rw_semaphore *l1, struct rw_semaphore *l2) { - if (likely(m2 != m1)) - mutex_unlock(m2); - mutex_unlock(m1); + if (likely(l2 != l1)) + up_read(l2); + up_read(l1); } -static int kcmp_lock(struct mutex *m1, struct mutex *m2) +static int kcmp_lock(struct rw_semaphore *l1, struct rw_semaphore *l2) { int err; - if (m2 > m1) - swap(m1, m2); + if (l2 > l1) + swap(l1, l2); - err = mutex_lock_killable(m1); - if (!err && likely(m1 != m2)) { - err = mutex_lock_killable_nested(m2, SINGLE_DEPTH_NESTING); + err = down_read_killable(l1); + if (!err && likely(l1 != l2)) { + err = down_read_killable_nested(l2, SINGLE_DEPTH_NESTING); if (err) - mutex_unlock(m1); + up_read(l1); } return err; @@ -173,8 +173,8 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type, /* * One should have enough rights to inspect task details. */ - ret = kcmp_lock(&task1->signal->exec_update_mutex, - &task2->signal->exec_update_mutex); + ret = kcmp_lock(&task1->signal->exec_update_lock, + &task2->signal->exec_update_lock); if (ret) goto err; if (!ptrace_may_access(task1, PTRACE_MODE_READ_REALCREDS) || @@ -229,8 +229,8 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type, } err_unlock: - kcmp_unlock(&task1->signal->exec_update_mutex, - &task2->signal->exec_update_mutex); + kcmp_unlock(&task1->signal->exec_update_lock, + &task2->signal->exec_update_lock); err: put_task_struct(task1); put_task_struct(task2); diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c index f11b9bd3431d28..a163542d178ee1 100644 --- a/kernel/locking/rwsem.c +++ b/kernel/locking/rwsem.c @@ -1345,6 +1345,18 @@ static inline void __down_read(struct rw_semaphore *sem) } } +static inline int __down_read_interruptible(struct rw_semaphore *sem) +{ + if (!rwsem_read_trylock(sem)) { + if (IS_ERR(rwsem_down_read_slowpath(sem, TASK_INTERRUPTIBLE))) + return -EINTR; + DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem); + } else { + rwsem_set_reader_owned(sem); + } + return 0; +} + static inline int __down_read_killable(struct rw_semaphore *sem) { if (!rwsem_read_trylock(sem)) { @@ -1495,6 +1507,20 @@ void __sched down_read(struct rw_semaphore *sem) } EXPORT_SYMBOL(down_read); +int __sched down_read_interruptible(struct rw_semaphore *sem) +{ + might_sleep(); + rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_); + + if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_interruptible)) { + rwsem_release(&sem->dep_map, _RET_IP_); + return -EINTR; + } + + return 0; +} +EXPORT_SYMBOL(down_read_interruptible); + int __sched down_read_killable(struct rw_semaphore *sem) { might_sleep(); @@ -1605,6 +1631,20 @@ void down_read_nested(struct rw_semaphore *sem, int subclass) } EXPORT_SYMBOL(down_read_nested); +int down_read_killable_nested(struct rw_semaphore *sem, int subclass) +{ + might_sleep(); + rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_); + + if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_killable)) { + rwsem_release(&sem->dep_map, _RET_IP_); + return -EINTR; + } + + return 0; +} +EXPORT_SYMBOL(down_read_killable_nested); + void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest) { might_sleep(); diff --git a/kernel/module.c b/kernel/module.c index a4fa44a652a757..e20499309b2af6 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -1895,7 +1895,6 @@ static int mod_sysfs_init(struct module *mod) if (err) mod_kobject_put(mod); - /* delay uevent until full sysfs population */ out: return err; } @@ -1932,7 +1931,6 @@ static int mod_sysfs_setup(struct module *mod, add_sect_attrs(mod, info); add_notes_attrs(mod, info); - kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD); return 0; out_unreg_modinfo_attrs: @@ -3639,6 +3637,9 @@ static noinline int do_init_module(struct module *mod) blocking_notifier_call_chain(&module_notify_list, MODULE_STATE_LIVE, mod); + /* Delay uevent until module has finished its init routine */ + kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD); + /* * We need to finish all async code before the module init sequence * is done. This has potential to deadlock. For example, a newly @@ -3991,6 +3992,7 @@ static int load_module(struct load_info *info, const char __user *uargs, MODULE_STATE_GOING, mod); klp_module_going(mod); bug_cleanup: + mod->state = MODULE_STATE_GOING; /* module_bug_cleanup needs module_mutex protection */ mutex_lock(&module_mutex); module_bug_cleanup(mod); diff --git a/kernel/pid.c b/kernel/pid.c index a96bc4bf4f8698..4856818c9de1ae 100644 --- a/kernel/pid.c +++ b/kernel/pid.c @@ -628,7 +628,7 @@ static struct file *__pidfd_fget(struct task_struct *task, int fd) struct file *file; int ret; - ret = mutex_lock_killable(&task->signal->exec_update_mutex); + ret = down_read_killable(&task->signal->exec_update_lock); if (ret) return ERR_PTR(ret); @@ -637,7 +637,7 @@ static struct file *__pidfd_fget(struct task_struct *task, int fd) else file = ERR_PTR(-EPERM); - mutex_unlock(&task->signal->exec_update_mutex); + up_read(&task->signal->exec_update_lock); return file ?: ERR_PTR(-EBADF); } diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 81632cd5e3b729..e8d351b7f9b033 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -941,13 +941,6 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts) */ if (tick_do_timer_cpu == cpu) return false; - /* - * Boot safety: make sure the timekeeping duty has been - * assigned before entering dyntick-idle mode, - * tick_do_timer_cpu is TICK_DO_TIMER_BOOT - */ - if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_BOOT)) - return false; /* Should not happen for nohz-full */ if (WARN_ON_ONCE(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) diff --git a/lib/zlib_dfltcc/Makefile b/lib/zlib_dfltcc/Makefile index 8e4d5afbbb1094..66e1c96387c407 100644 --- a/lib/zlib_dfltcc/Makefile +++ b/lib/zlib_dfltcc/Makefile @@ -8,4 +8,4 @@ obj-$(CONFIG_ZLIB_DFLTCC) += zlib_dfltcc.o -zlib_dfltcc-objs := dfltcc.o dfltcc_deflate.o dfltcc_inflate.o dfltcc_syms.o +zlib_dfltcc-objs := dfltcc.o dfltcc_deflate.o dfltcc_inflate.o diff --git a/lib/zlib_dfltcc/dfltcc.c b/lib/zlib_dfltcc/dfltcc.c index c30de430b30ca6..782f76e9d4dab1 100644 --- a/lib/zlib_dfltcc/dfltcc.c +++ b/lib/zlib_dfltcc/dfltcc.c @@ -1,7 +1,8 @@ // SPDX-License-Identifier: Zlib /* dfltcc.c - SystemZ DEFLATE CONVERSION CALL support. */ -#include +#include +#include #include "dfltcc_util.h" #include "dfltcc.h" @@ -53,3 +54,6 @@ void dfltcc_reset( dfltcc_state->dht_threshold = DFLTCC_DHT_MIN_SAMPLE_SIZE; dfltcc_state->param.ribm = DFLTCC_RIBM; } +EXPORT_SYMBOL(dfltcc_reset); + +MODULE_LICENSE("GPL"); diff --git a/lib/zlib_dfltcc/dfltcc_deflate.c b/lib/zlib_dfltcc/dfltcc_deflate.c index 00c185101c6d14..6c946e8532eec6 100644 --- a/lib/zlib_dfltcc/dfltcc_deflate.c +++ b/lib/zlib_dfltcc/dfltcc_deflate.c @@ -4,6 +4,7 @@ #include "dfltcc_util.h" #include "dfltcc.h" #include +#include #include /* @@ -34,6 +35,7 @@ int dfltcc_can_deflate( return 1; } +EXPORT_SYMBOL(dfltcc_can_deflate); static void dfltcc_gdht( z_streamp strm @@ -277,3 +279,4 @@ int dfltcc_deflate( goto again; /* deflate() must use all input or all output */ return 1; } +EXPORT_SYMBOL(dfltcc_deflate); diff --git a/lib/zlib_dfltcc/dfltcc_inflate.c b/lib/zlib_dfltcc/dfltcc_inflate.c index db107016d29b32..fb60b5a6a1cb67 100644 --- a/lib/zlib_dfltcc/dfltcc_inflate.c +++ b/lib/zlib_dfltcc/dfltcc_inflate.c @@ -125,7 +125,7 @@ dfltcc_inflate_action dfltcc_inflate( param->ho = (state->write - state->whave) & ((1 << HB_BITS) - 1); if (param->hl) param->nt = 0; /* Honor history for the first block */ - param->cv = state->flags ? REVERSE(state->check) : state->check; + param->cv = state->check; /* Inflate */ do { @@ -138,7 +138,7 @@ dfltcc_inflate_action dfltcc_inflate( state->bits = param->sbb; state->whave = param->hl; state->write = (param->ho + param->hl) & ((1 << HB_BITS) - 1); - state->check = state->flags ? REVERSE(param->cv) : param->cv; + state->check = param->cv; if (cc == DFLTCC_CC_OP2_CORRUPT && param->oesc != 0) { /* Report an error if stream is corrupted */ state->mode = BAD; diff --git a/lib/zlib_dfltcc/dfltcc_syms.c b/lib/zlib_dfltcc/dfltcc_syms.c deleted file mode 100644 index 6f23481804c1d8..00000000000000 --- a/lib/zlib_dfltcc/dfltcc_syms.c +++ /dev/null @@ -1,17 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * linux/lib/zlib_dfltcc/dfltcc_syms.c - * - * Exported symbols for the s390 zlib dfltcc support. - * - */ - -#include -#include -#include -#include "dfltcc.h" - -EXPORT_SYMBOL(dfltcc_can_deflate); -EXPORT_SYMBOL(dfltcc_deflate); -EXPORT_SYMBOL(dfltcc_reset); -MODULE_LICENSE("GPL"); diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 3b38ea958e9541..1fd11f96a707ab 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -4106,10 +4106,30 @@ static vm_fault_t hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, * may get SIGKILLed if it later faults. */ if (outside_reserve) { + struct address_space *mapping = vma->vm_file->f_mapping; + pgoff_t idx; + u32 hash; + put_page(old_page); BUG_ON(huge_pte_none(pte)); + /* + * Drop hugetlb_fault_mutex and i_mmap_rwsem before + * unmapping. unmapping needs to hold i_mmap_rwsem + * in write mode. Dropping i_mmap_rwsem in read mode + * here is OK as COW mappings do not interact with + * PMD sharing. + * + * Reacquire both after unmap operation. + */ + idx = vma_hugecache_offset(h, vma, haddr); + hash = hugetlb_fault_mutex_hash(mapping, idx); + mutex_unlock(&hugetlb_fault_mutex_table[hash]); + i_mmap_unlock_read(mapping); + unmap_ref_private(mm, vma, old_page, haddr); - BUG_ON(huge_pte_none(pte)); + + i_mmap_lock_read(mapping); + mutex_lock(&hugetlb_fault_mutex_table[hash]); spin_lock(ptl); ptep = huge_pte_offset(mm, haddr, huge_page_size(h)); if (likely(ptep && diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 0f855deea4b2d4..aa453a43314372 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -714,7 +714,7 @@ void __ref move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn, * expects the zone spans the pfn range. All the pages in the range * are reserved so nobody should be touching them so we should be safe */ - memmap_init_zone(nr_pages, nid, zone_idx(zone), start_pfn, + memmap_init_zone(nr_pages, nid, zone_idx(zone), start_pfn, 0, MEMINIT_HOTPLUG, altmap, migratetype); set_zone_contiguous(zone); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 32f783ddb5c3a6..14b9e83ff9da2d 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -448,6 +448,8 @@ defer_init(int nid, unsigned long pfn, unsigned long end_pfn) if (end_pfn < pgdat_end_pfn(NODE_DATA(nid))) return false; + if (NODE_DATA(nid)->first_deferred_pfn != ULONG_MAX) + return true; /* * We start only with one section of pages, more pages are added as * needed until the rest of deferred pages are initialized. @@ -6050,7 +6052,7 @@ overlap_memmap_init(unsigned long zone, unsigned long *pfn) * zone stats (e.g., nr_isolate_pageblock) are touched. */ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, - unsigned long start_pfn, + unsigned long start_pfn, unsigned long zone_end_pfn, enum meminit_context context, struct vmem_altmap *altmap, int migratetype) { @@ -6086,7 +6088,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, if (context == MEMINIT_EARLY) { if (overlap_memmap_init(zone, &pfn)) continue; - if (defer_init(nid, pfn, end_pfn)) + if (defer_init(nid, pfn, zone_end_pfn)) break; } @@ -6200,7 +6202,7 @@ void __meminit __weak memmap_init(unsigned long size, int nid, if (end_pfn > start_pfn) { size = end_pfn - start_pfn; - memmap_init_zone(size, nid, zone, start_pfn, + memmap_init_zone(size, nid, zone, start_pfn, range_end_pfn, MEMINIT_EARLY, NULL, MIGRATE_MOVABLE); } } diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 502552d6e9aff3..c4aa2cbb926974 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -763,7 +763,7 @@ static int hci_init3_req(struct hci_request *req, unsigned long opt) hci_req_add(req, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL); } - if (hdev->commands[35] & 0x40) { + if (hdev->commands[35] & 0x04) { __le16 rpa_timeout = cpu_to_le16(hdev->rpa_timeout); /* Set RPA timeout */ diff --git a/net/ethtool/channels.c b/net/ethtool/channels.c index 5635604cb9ba1e..25a9e566ef5cdd 100644 --- a/net/ethtool/channels.c +++ b/net/ethtool/channels.c @@ -194,8 +194,9 @@ int ethnl_set_channels(struct sk_buff *skb, struct genl_info *info) if (netif_is_rxfh_configured(dev) && !ethtool_get_max_rxfh_channel(dev, &max_rx_in_use) && (channels.combined_count + channels.rx_count) <= max_rx_in_use) { + ret = -EINVAL; GENL_SET_ERR_MSG(info, "requested channel counts are too low for existing indirection table settings"); - return -EINVAL; + goto out_ops; } /* Disabling channels, query zero-copy AF_XDP sockets */ @@ -203,8 +204,9 @@ int ethnl_set_channels(struct sk_buff *skb, struct genl_info *info) min(channels.rx_count, channels.tx_count); for (i = from_channel; i < old_total; i++) if (xsk_get_pool_from_qid(dev, i)) { + ret = -EINVAL; GENL_SET_ERR_MSG(info, "requested channel counts are too low for existing zerocopy AF_XDP sockets"); - return -EINVAL; + goto out_ops; } ret = dev->ethtool_ops->set_channels(dev, &channels); diff --git a/net/ethtool/strset.c b/net/ethtool/strset.c index 0baad0ce183282..c3a5489964cdeb 100644 --- a/net/ethtool/strset.c +++ b/net/ethtool/strset.c @@ -182,7 +182,7 @@ static int strset_parse_request(struct ethnl_req_info *req_base, ret = strset_get_id(attr, &id, extack); if (ret < 0) return ret; - if (ret >= ETH_SS_COUNT) { + if (id >= ETH_SS_COUNT) { NL_SET_ERR_MSG_ATTR(extack, attr, "unknown string set id"); return -EOPNOTSUPP; diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index 88f2a7a0ccb86b..967ce9ccfc0da3 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -2081,6 +2081,8 @@ struct sock *mptcp_sk_clone(const struct sock *sk, sock_reset_flag(nsk, SOCK_RCU_FREE); /* will be fully established after successful MPC subflow creation */ inet_sk_state_store(nsk, TCP_SYN_RECV); + + security_inet_csk_clone(nsk, req); bh_unlock_sock(nsk); /* keep a single reference */ diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c index b0ad7687ee2c85..c6653ee7f701bd 100644 --- a/net/sched/sch_taprio.c +++ b/net/sched/sch_taprio.c @@ -1596,6 +1596,21 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt, return err; } +static void taprio_reset(struct Qdisc *sch) +{ + struct taprio_sched *q = qdisc_priv(sch); + struct net_device *dev = qdisc_dev(sch); + int i; + + hrtimer_cancel(&q->advance_timer); + if (q->qdiscs) { + for (i = 0; i < dev->num_tx_queues && q->qdiscs[i]; i++) + qdisc_reset(q->qdiscs[i]); + } + sch->qstats.backlog = 0; + sch->q.qlen = 0; +} + static void taprio_destroy(struct Qdisc *sch) { struct taprio_sched *q = qdisc_priv(sch); @@ -1606,7 +1621,6 @@ static void taprio_destroy(struct Qdisc *sch) list_del(&q->taprio_list); spin_unlock(&taprio_list_lock); - hrtimer_cancel(&q->advance_timer); taprio_disable_offload(dev, q, NULL); @@ -1953,6 +1967,7 @@ static struct Qdisc_ops taprio_qdisc_ops __read_mostly = { .init = taprio_init, .change = taprio_change, .destroy = taprio_destroy, + .reset = taprio_reset, .peek = taprio_peek, .dequeue = taprio_dequeue, .enqueue = taprio_enqueue, diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c index 47b155a49226f4..9f3f8e953ff04e 100644 --- a/sound/core/pcm_native.c +++ b/sound/core/pcm_native.c @@ -755,8 +755,13 @@ static int snd_pcm_hw_params(struct snd_pcm_substream *substream, runtime->boundary *= 2; /* clear the buffer for avoiding possible kernel info leaks */ - if (runtime->dma_area && !substream->ops->copy_user) - memset(runtime->dma_area, 0, runtime->dma_bytes); + if (runtime->dma_area && !substream->ops->copy_user) { + size_t size = runtime->dma_bytes; + + if (runtime->info & SNDRV_PCM_INFO_MMAP) + size = PAGE_ALIGN(size); + memset(runtime->dma_area, 0, size); + } snd_pcm_timer_resolution_change(substream); snd_pcm_set_state(substream, SNDRV_PCM_STATE_SETUP); diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c index c78720a3299c42..257ad5206240fd 100644 --- a/sound/core/rawmidi.c +++ b/sound/core/rawmidi.c @@ -95,11 +95,21 @@ static inline unsigned short snd_rawmidi_file_flags(struct file *file) } } -static inline int snd_rawmidi_ready(struct snd_rawmidi_substream *substream) +static inline bool __snd_rawmidi_ready(struct snd_rawmidi_runtime *runtime) +{ + return runtime->avail >= runtime->avail_min; +} + +static bool snd_rawmidi_ready(struct snd_rawmidi_substream *substream) { struct snd_rawmidi_runtime *runtime = substream->runtime; + unsigned long flags; + bool ready; - return runtime->avail >= runtime->avail_min; + spin_lock_irqsave(&runtime->lock, flags); + ready = __snd_rawmidi_ready(runtime); + spin_unlock_irqrestore(&runtime->lock, flags); + return ready; } static inline int snd_rawmidi_ready_append(struct snd_rawmidi_substream *substream, @@ -1019,7 +1029,7 @@ int snd_rawmidi_receive(struct snd_rawmidi_substream *substream, if (result > 0) { if (runtime->event) schedule_work(&runtime->event_work); - else if (snd_rawmidi_ready(substream)) + else if (__snd_rawmidi_ready(runtime)) wake_up(&runtime->sleep); } spin_unlock_irqrestore(&runtime->lock, flags); @@ -1098,7 +1108,7 @@ static ssize_t snd_rawmidi_read(struct file *file, char __user *buf, size_t coun result = 0; while (count > 0) { spin_lock_irq(&runtime->lock); - while (!snd_rawmidi_ready(substream)) { + while (!__snd_rawmidi_ready(runtime)) { wait_queue_entry_t wait; if ((file->f_flags & O_NONBLOCK) != 0 || result > 0) { @@ -1115,9 +1125,11 @@ static ssize_t snd_rawmidi_read(struct file *file, char __user *buf, size_t coun return -ENODEV; if (signal_pending(current)) return result > 0 ? result : -ERESTARTSYS; - if (!runtime->avail) - return result > 0 ? result : -EIO; spin_lock_irq(&runtime->lock); + if (!runtime->avail) { + spin_unlock_irq(&runtime->lock); + return result > 0 ? result : -EIO; + } } spin_unlock_irq(&runtime->lock); count1 = snd_rawmidi_kernel_read1(substream, @@ -1255,7 +1267,7 @@ int __snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int coun runtime->avail += count; substream->bytes += count; if (count > 0) { - if (runtime->drain || snd_rawmidi_ready(substream)) + if (runtime->drain || __snd_rawmidi_ready(runtime)) wake_up(&runtime->sleep); } return count; @@ -1444,9 +1456,11 @@ static ssize_t snd_rawmidi_write(struct file *file, const char __user *buf, return -ENODEV; if (signal_pending(current)) return result > 0 ? result : -ERESTARTSYS; - if (!runtime->avail && !timeout) - return result > 0 ? result : -EIO; spin_lock_irq(&runtime->lock); + if (!runtime->avail && !timeout) { + spin_unlock_irq(&runtime->lock); + return result > 0 ? result : -EIO; + } } spin_unlock_irq(&runtime->lock); count1 = snd_rawmidi_kernel_write1(substream, buf, NULL, count); @@ -1526,6 +1540,7 @@ static void snd_rawmidi_proc_info_read(struct snd_info_entry *entry, struct snd_rawmidi *rmidi; struct snd_rawmidi_substream *substream; struct snd_rawmidi_runtime *runtime; + unsigned long buffer_size, avail, xruns; rmidi = entry->private_data; snd_iprintf(buffer, "%s\n\n", rmidi->name); @@ -1544,13 +1559,16 @@ static void snd_rawmidi_proc_info_read(struct snd_info_entry *entry, " Owner PID : %d\n", pid_vnr(substream->pid)); runtime = substream->runtime; + spin_lock_irq(&runtime->lock); + buffer_size = runtime->buffer_size; + avail = runtime->avail; + spin_unlock_irq(&runtime->lock); snd_iprintf(buffer, " Mode : %s\n" " Buffer size : %lu\n" " Avail : %lu\n", runtime->oss ? "OSS compatible" : "native", - (unsigned long) runtime->buffer_size, - (unsigned long) runtime->avail); + buffer_size, avail); } } } @@ -1568,13 +1586,16 @@ static void snd_rawmidi_proc_info_read(struct snd_info_entry *entry, " Owner PID : %d\n", pid_vnr(substream->pid)); runtime = substream->runtime; + spin_lock_irq(&runtime->lock); + buffer_size = runtime->buffer_size; + avail = runtime->avail; + xruns = runtime->xruns; + spin_unlock_irq(&runtime->lock); snd_iprintf(buffer, " Buffer size : %lu\n" " Avail : %lu\n" " Overruns : %lu\n", - (unsigned long) runtime->buffer_size, - (unsigned long) runtime->avail, - (unsigned long) runtime->xruns); + buffer_size, avail, xruns); } } } diff --git a/sound/core/seq/seq_queue.h b/sound/core/seq/seq_queue.h index 9254c8dbe5e379..25d2d6b6100791 100644 --- a/sound/core/seq/seq_queue.h +++ b/sound/core/seq/seq_queue.h @@ -26,10 +26,10 @@ struct snd_seq_queue { struct snd_seq_timer *timer; /* time keeper for this queue */ int owner; /* client that 'owns' the timer */ - unsigned int locked:1, /* timer is only accesibble by owner if set */ - klocked:1, /* kernel lock (after START) */ - check_again:1, - check_blocked:1; + bool locked; /* timer is only accesibble by owner if set */ + bool klocked; /* kernel lock (after START) */ + bool check_again; /* concurrent access happened during check */ + bool check_blocked; /* queue being checked */ unsigned int flags; /* status flags */ unsigned int info_flags; /* info for sync */ diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index 2ddc27db8c0128..d12b4799c3cb76 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -1736,7 +1736,7 @@ static void silent_stream_disable(struct hda_codec *codec, per_pin->silent_stream = false; unlock_out: - mutex_unlock(&spec->pcm_lock); + mutex_unlock(&per_pin->lock); } /* update ELD and jack state via audio component */ diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index dde5ba20954157..006af6541dada8 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -7885,7 +7885,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1028, 0x09bf, "Dell Precision", ALC233_FIXUP_ASUS_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x0a2e, "Dell", ALC236_FIXUP_DELL_AIO_HEADSET_MIC), SND_PCI_QUIRK(0x1028, 0x0a30, "Dell", ALC236_FIXUP_DELL_AIO_HEADSET_MIC), - SND_PCI_QUIRK(0x1028, 0x0a58, "Dell Precision 3650 Tower", ALC255_FIXUP_DELL_HEADSET_MIC), + SND_PCI_QUIRK(0x1028, 0x0a58, "Dell", ALC255_FIXUP_DELL_HEADSET_MIC), SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), diff --git a/tools/include/uapi/linux/const.h b/tools/include/uapi/linux/const.h index 5ed721ad5b1985..af2a44c08683de 100644 --- a/tools/include/uapi/linux/const.h +++ b/tools/include/uapi/linux/const.h @@ -28,4 +28,9 @@ #define _BITUL(x) (_UL(1) << (x)) #define _BITULL(x) (_ULL(1) << (x)) +#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1) +#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask)) + +#define __KERNEL_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) + #endif /* _UAPI_LINUX_CONST_H */